Publications

2025

  • P. M. Blok, F. Magistri, C. Stachniss, H. Wang, J. Burridge, and W. Guo, “High-Throughput 3D Shape Completion of Potato Tubers on a Harvester,” Computers and Electronics in Agriculture, vol. 228, p. 109673, 2025. doi:https://doi.org/10.1016/j.compag.2024.109673
    [BibTeX] [PDF]
    @article{blok2025cea,
    author = {P.M. Blok and F. Magistri and C. Stachniss and H. Wang and J. Burridge and W. Guo},
    title = {{High-Throughput 3D Shape Completion of Potato Tubers on a Harvester}},
    journal = cea,
    year = 2025,
    volume = {228},
    pages = {109673},
    doi = {https://doi.org/10.1016/j.compag.2024.109673},
    }

2024

  • M. Zeller, D. Casado Herraez, B. Ayan, J. Behley, M. Heidingsfeld, and C. Stachniss, “SemRaFiner: Panoptic Segmentation in Sparse and Noisy Radar Point Clouds,” IEEE Robotics and Automation Letters (RA-L), 2024. doi:10.1109/LRA.2024.3502058
    [BibTeX] [PDF]
    @article{zeller2024ral,
    author = {M. Zeller and Casado Herraez, D. and B. Ayan and J. Behley and M. Heidingsfeld and C. Stachniss},
    title = {{SemRaFiner: Panoptic Segmentation in Sparse and Noisy Radar Point
    Clouds}},
    journal = ral,
    year = {2024},
    volume = {},
    number = {},
    pages = {},
    issn = {2377-3766},
    doi = {10.1109/LRA.2024.3502058},
    }

  • L. Wiesmann, T. Läbe, L. Nunes, J. Behley, and C. Stachniss, “Joint Intrinsic and Extrinsic Calibration of Perception Systems Utilizing a Calibration Environment,” IEEE Robotics and Automation Letters (RA-L), vol. 9, iss. 10, pp. 9103-9110, 2024. doi:10.1109/LRA.2024.3457385
    [BibTeX] [PDF]
    @article{wiesmann2024ral,
    author = {L. Wiesmann and T. L\"abe and L. Nunes and J. Behley and C. Stachniss},
    title = {{Joint Intrinsic and Extrinsic Calibration of Perception Systems Utilizing a Calibration Environment}},
    journal = ral,
    year = {2024},
    volume = {9},
    number = {10},
    pages = {9103-9110},
    issn = {2377-3766},
    doi = {10.1109/LRA.2024.3457385},
    }

  • C. Witte, J. Behley, C. Stachniss, and M. Raaijmakers, “Epipolar Attention Field Transformers for Bird’s Eye View Semantic Segmentation,” arXiv Preprint, vol. arXiv:2412.01595, 2024.
    [BibTeX] [PDF]

    Spatial understanding of the semantics of the surroundings is a key capability needed by autonomous cars to enable safe driving decisions. Recently, purely vision-based solutions have gained increasing research interest. In particular, approaches extracting a bird’s eye view (BEV) from multiple cameras have demonstrated great performance for spatial understanding. This paper addresses the dependency on learned positional encodings to correlate image and BEV feature map elements for transformer-based methods. We propose leveraging epipolar geometric constraints to model the relationship between cameras and the BEV by Epipolar Attention Fields. They are incorporated into the attention mechanism as a novel attribution term, serving as an alternative to learned positional encodings. Experiments show that our method EAFormer outperforms previous BEV approaches by 2\% mIoU for map semantic segmentation and exhibits superior generalization capabilities compared to implicitly learning the camera configuration.

    @article{witte2024arxiv,
    author = {C. Witte and J. Behley and C. Stachniss and M. Raaijmakers},
    title = {{Epipolar Attention Field Transformers for Bird's Eye View Semantic Segmentation}},
    journal = arxiv,
    year = 2024,
    volume = {arXiv:2412.01595},
    url = {http://arxiv.org/pdf/2412.01595v1},
    abstract = {Spatial understanding of the semantics of the surroundings is a key capability needed by autonomous cars to enable safe driving decisions. Recently, purely vision-based solutions have gained increasing research interest. In particular, approaches extracting a bird's eye view (BEV) from multiple cameras have demonstrated great performance for spatial understanding. This paper addresses the dependency on learned positional encodings to correlate image and BEV feature map elements for transformer-based methods. We propose leveraging epipolar geometric constraints to model the relationship between cameras and the BEV by Epipolar Attention Fields. They are incorporated into the attention mechanism as a novel attribution term, serving as an alternative to learned positional encodings. Experiments show that our method EAFormer outperforms previous BEV approaches by 2\% mIoU for map semantic segmentation and exhibits superior generalization capabilities compared to implicitly learning the camera configuration.}
    }

  • T. Guadagnino, B. Mersch, I. Vizzo, S. Gupta, M. V. R. Malladi, L. Lobefaro, G. Doisy, and C. Stachniss, “Kinematic-ICP: Enhancing LiDAR Odometry with Kinematic Constraints for Wheeled Mobile Robots Moving on Planar Surfaces,” arXiv Preprint, vol. arXiv:2410.10277, 2024.
    [BibTeX] [PDF] [Code]
    @article{guadagnino2024arxiv,
    author = {Guadagnino, T. and Mersch, B. and Vizzo, I. and Gupta, S. and Malladi, M.V.R. and Lobefaro, L. and Doisy, G. and Stachniss, C.},
    title = {{Kinematic-ICP: Enhancing LiDAR Odometry with Kinematic Constraints for Wheeled Mobile Robots Moving on Planar Surfaces}},
    journal = arxiv,
    year = {2024},
    volume = {arXiv:2410.10277},
    url = {https://arxiv.org/pdf/2410.10277},
    codeurl = {https://github.com/PRBonn/kinematic-icp},
    }

  • F. Magistri, T. Läbe, E. Marks, S. Nagulavancha, Y. Pan, C. Smitt, L. Klingbeil, M. Halstead, H. Kuhlmann, C. McCool, J. Behley, and C. Stachniss, “A Dataset and Benchmark for Shape Completion of Fruits for Agricultural Robotics,” arXiv Preprint, 2024.
    [BibTeX] [PDF]
    @article{magistri2024arxiv,
    title={{A Dataset and Benchmark for Shape Completion of Fruits for Agricultural Robotics}},
    author={F. Magistri and T. L\"abe and E. Marks and S. Nagulavancha and Y. Pan and C. Smitt and L. Klingbeil and M. Halstead and H. Kuhlmann and C. McCool and J. Behley and C. Stachniss},
    journal = arxiv,
    year=2024,
    eprint={2407.13304},
    }

  • M. Sodano, F. Magistri, J. Behley, and C. Stachniss, “Open-World Panoptic Segmentation,” arXiv Preprint, vol. arXiv:2412.12740, 2024.
    [BibTeX] [PDF]
    @article{sodano2024arxiv,
    author = {M. Sodano and F. Magistri and J. Behley and C. Stachniss},
    title = {{Open-World Panoptic Segmentation}},
    journal = arxiv,
    year = 2024,
    volume = {arXiv:2412.12740},
    url = {http://arxiv.org/pdf/2412.12740.pdf},
    }

  • P. M. Blok, F. Magistri, C. Stachniss, H. Wang, J. Burridge, and W. Guo, “High-Throughput 3D Shape Completion of Potato Tubers on a Harvester,” arXiv Preprint, vol. arXiv:2407.21341, 2024.
    [BibTeX] [PDF]
    @article{blok2024arxiv,
    author = {P.M. Blok and F. Magistri and C. Stachniss and H. Wang and J. Burridge and W. Guo},
    title = {{High-Throughput 3D Shape Completion of Potato Tubers on a Harvester}},
    journal = arxiv,
    year = 2024,
    volume = {arXiv:2407.21341},
    url = {http://arxiv.org/pdf/2407.21341v1},
    }

  • Y. Pan, X. Zhong, L. Wiesmann, T. Posewsky, J. Behley, and C. Stachniss, “PIN-SLAM: LiDAR SLAM Using a Point-Based Implicit Neural Representation for Achieving Global Map Consistency,” , vol. 40, pp. 4045-4064, 2024. doi:10.1109/TRO.2024.3422055
    [BibTeX] [PDF] [Code]
    @article{pan2024tro,
    author = {Y. Pan and X. Zhong and L. Wiesmann and T. Posewsky and J. Behley and C. Stachniss},
    title = {{PIN-SLAM: LiDAR SLAM Using a Point-Based Implicit Neural Representation for Achieving Global Map Consistency}},
    journal = tro,
    year = {2024},
    pages = {4045-4064},
    volume = {40},
    doi = {10.1109/TRO.2024.3422055},
    codeurl = {https://github.com/PRBonn/PIN_SLAM},
    }

  • J. Weyler, F. Magistri, E. Marks, Y. L. Chong, M. Sodano, G. Roggiolani, N. Chebrolu, C. Stachniss, and J. Behley, “PhenoBench: A Large Dataset and Benchmarks for Semantic Image Interpretation in the Agricultural Domain,” IEEE Trans. on Pattern Analysis and Machine Intelligence (TPAMI), 2024. doi:10.1109/TPAMI.2024.3419548
    [BibTeX] [PDF] [Code]
    @article{weyler2024tpami,
    author = {J. Weyler and F. Magistri and E. Marks and Y.L. Chong and M. Sodano and G. Roggiolani and N. Chebrolu and C. Stachniss and J. Behley},
    title = {{PhenoBench: A Large Dataset and Benchmarks for Semantic Image Interpretation in the Agricultural Domain}},
    journal = tpami,
    year = {2024},
    volume = {},
    number = {},
    pages = {},
    doi = {10.1109/TPAMI.2024.3419548},
    codeurl = {https://github.com/PRBonn/phenobench},
    }

  • D. Casado Herraez, L. Chang, M. Zeller, L. Wiesmann, J. Behley, M. Heidingsfeld, and C. Stachniss, “SPR: Single-Scan Radar Place Recognition,” IEEE Robotics and Automation Letters (RA-L), vol. 9, iss. 10, pp. 9079-9086, 2024.
    [BibTeX] [PDF]
    @article{casado-herraez2024ral,
    author = {Casado Herraez, D. and L. Chang and M. Zeller and L. Wiesmann and J. Behley and M. Heidingsfeld and C. Stachniss},
    title = {{SPR: Single-Scan Radar Place Recognition}},
    journal = ral,
    year = {2024},
    volume = {9},
    number = {10},
    pages = {9079-9086},
    }

  • A. Vashisth, J. Rückin, F. Magistri, C. Stachniss, and M. Popović, “Deep Reinforcement Learning with Dynamic Graphs for Adaptive Informative Path Planning,” IEEE Robotics and Automation Letters (RA-L), vol. 9, iss. 9, pp. 7747-7754, 2024. doi:10.1109/LRA.2024.3421188
    [BibTeX] [PDF] [Code]
    @article{vashisth2024ral,
    author = {A. Vashisth and J. R\"uckin and F. Magistri and C.
    Stachniss and M. Popovi\'c},
    title = {{Deep Reinforcement Learning with Dynamic Graphs for Adaptive
    Informative Path Planning}},
    journal = ral,
    volume = {9},
    number = {9},
    pages = {7747-7754},
    year = 2024,
    doi = {10.1109/LRA.2024.3421188},
    codeurl = {https://github.com/dmar-bonn/ipp-rl-3d},
    }

  • F. Magistri, Y. Pan, J. Bartels, J. Behley, C. Stachniss, and C. Lehnert, “Improving Robotic Fruit Harvesting Within Cluttered Environments Through 3D Shape Completion,” IEEE Robotics and Automation Letters (RA-L), vol. 9, iss. 8, p. 7357–7364, 2024. doi:10.1109/LRA.2024.3421788
    [BibTeX] [PDF]
    @article{magistri2024ral,
    author = {F. Magistri and Y. Pan and J. Bartels and J. Behley and C. Stachniss and C. Lehnert},
    title = {{Improving Robotic Fruit Harvesting Within Cluttered Environments
    Through 3D Shape Completion}},
    journal = ral,
    volume = {9},
    number = {8},
    pages = {7357--7364},
    year = 2024,
    doi = {10.1109/LRA.2024.3421788},
    }

  • I. B. Opra, B. Le Dem, J. Walls, D. Lukarski, and C. Stachniss, “Leveraging GNSS and Onboard Visual Data from Consumer Vehicles for Robust Road Network Estimation,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2024.
    [BibTeX] [PDF]
    @inproceedings{opra2024iros,
    author = {I.B. Opra and Le Dem, B. and J. Walls and D. Lukarski and C. Stachniss},
    title = {{Leveraging GNSS and Onboard Visual Data from Consumer Vehicles for Robust Road Network Estimation}},
    booktitle = iros,
    year = 2024,
    }

  • L. Lobefaro, M. V. R. Malladi, T. Guadagnino, and C. Stachniss, “Spatio-Temporal Consistent Mapping of Growing Plants for Agricultural Robots in the Wild,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2024.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{lobefaro2024iros,
    author = {L. Lobefaro and M.V.R. Malladi and T. Guadagnino and C. Stachniss},
    title = {{Spatio-Temporal Consistent Mapping of Growing Plants for Agricultural Robots in the Wild}},
    booktitle = iros,
    year = 2024,
    codeurl = {https://github.com/PRBonn/spatio-temporal-mapping.git},
    videourl = {https://youtu.be/bnWZWd5DHTg},
    }

  • E. A. Marks, J. Bömer, F. Magistri, A. Sah, J. Behley, and C. Stachniss, “BonnBeetClouds3D: A Dataset Towards Point Cloud-Based Organ-Level Phenotyping of Sugar Beet Plants Under Real Field Conditions,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2024.
    [BibTeX] [PDF]
    @inproceedings{marks2024iros,
    author = {E.A. Marks and J. B\"omer and F. Magistri and A. Sah and J. Behley and C. Stachniss},
    title = {{BonnBeetClouds3D: A Dataset Towards Point Cloud-Based Organ-Level Phenotyping of Sugar Beet Plants Under Real Field Conditions}},
    booktitle = iros,
    year = 2024,
    }

  • H. Lim, S. Jang, B. Mersch, J. Behley, H. Myung, and C. Stachniss, “HeLiMOS: A Dataset for Moving Object Segmentation in 3D Point Clouds From Heterogeneous LiDAR Sensors,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2024.
    [BibTeX] [PDF]
    @inproceedings{lim2024iros,
    author = {H. Lim and S. Jang and B. Mersch and J. Behley and H. Myung and C. Stachniss},
    title = {{HeLiMOS: A Dataset for Moving Object Segmentation in 3D Point Clouds From Heterogeneous LiDAR Sensors}},
    booktitle = iros,
    year = 2024,
    }

  • R. Schirmer, N. Vaskevicius, P. Biber, and C. Stachniss, “Fast Global Point Cloud Registration using Semantic NDT,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2024.
    [BibTeX] [PDF]
    @inproceedings{schirmer2024iros,
    author = {R. Schirmer and N. Vaskevicius and P. Biber and C. Stachniss},
    title = {{Fast Global Point Cloud Registration using Semantic NDT}},
    booktitle = iros,
    year = 2024,
    }

  • L. Jin, H. Kuang, Y. Pan, C. Stachniss, and M. Popović, “STAIR: Semantic-Targeted Active Implicit Reconstruction,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2024.
    [BibTeX] [PDF] [Code]
    @inproceedings{jin2024iros,
    author = {L. Jin and H. Kuang and Y. Pan and C. Stachniss and M. Popovi\'c},
    title = {{STAIR: Semantic-Targeted Active Implicit Reconstruction}},
    booktitle = iros,
    year = 2024,
    codeurl = {https://github.com/dmar-bonn/stair}
    }

  • S. Pan, L. Jin, X. Huang, C. Stachniss, M. Popović, and M. Bennewitz, “Exploiting Priors from 3D Diffusion Models for RGB-Based One-Shot View Planning,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2024.
    [BibTeX] [PDF]
    @inproceedings{pan2024iros,
    author = {S. Pan and L. Jin and X. Huang and C. Stachniss and M. Popovi\'c and M. Bennewitz},
    title = {{Exploiting Priors from 3D Diffusion Models for RGB-Based One-Shot View Planning}},
    booktitle = iros,
    year = 2024,
    }

  • J. Rückin, F. Magistri, C. Stachniss, and M. Popović, “Active Learning of Robot Vision Using Adaptive Path Planning,” in Proc.~of the IROS Workshop on Label Efficient Learning Paradigms for Autonomoy at Scale, 2024.
    [BibTeX] [PDF]
    @inproceedings{rueckin2024irosws,
    author = {J. R\"uckin and F. Magistri and C. Stachniss and M. Popovi\'c},
    title = {{Active Learning of Robot Vision Using Adaptive Path Planning}},
    booktitle = {Proc.~of the IROS Workshop on Label Efficient Learning Paradigms for Autonomoy at Scale},
    year = 2024,
    url = {https://arxiv.org/pdf/2410.10684},
    }

  • A. Narenthiran Sivakumar, M. Magistri, M. Valverde Gasparino, J. Behley, C. Stachniss, and G. Chowdhary, “AdaCropFollow: Self-Supervised Online Adaptation for Visual Under-Canopy Navigation,” in Proc.~of the IROS 2024 Workshop on AI and Robotics For Future Farming, 2024.
    [BibTeX] [PDF]
    @inproceedings{narenthiran-sivakumar2024irosws,
    author = {Narenthiran Sivakumar, A. and Magistri, M. and Valverde Gasparino, M. and Behley, J. and Stachniss, C. and Chowdhary, G.},
    title = {{AdaCropFollow: Self-Supervised Online Adaptation for Visual Under-Canopy Navigation}},
    booktitle = {Proc.~of the IROS 2024 Workshop on AI and Robotics For Future Farming},
    year = 2024,
    url = {https://arxiv.org/pdf/2410.12411},
    }

  • M. Popović, J. Ott, J. Rückin, and M. J. Kochenderfer, “Learning-based methods for adaptive informative path planning,” Journal on Robotics and Autonomous Systems (RAS), vol. 179, p. 104727, 2024.
    [BibTeX] [PDF] [Code]
    @article{popovic2024jras,
    title = {{Learning-based methods for adaptive informative path planning}},
    author = {Popovi{\'c}, M. and Ott, J. and R{\"u}ckin, J. and Kochenderfer, M.J.},
    journal = jras,
    volume = {179},
    pages = {104727},
    year = {2024},
    codeurl = {https://dmar-bonn.github.io/aipp-survey},
    }

  • J. Bömer, F. Esser, E. A. Marks, R. A. Rosu, S. Behnke, L. Klingbeil, H. Kuhlmann, C. Stachniss, A. -K. Mahlein, and S. Paulus, “A 3D Printed Plant Model for Accurate and Reliable 3D Plant Phenotyping,” GigaScience, vol. 13, p. giae035, 2024. doi:10.1093/gigascience/giae035
    [BibTeX] [PDF]
    @article{boemer2024giga,
    author = {J. B\"omer and F. Esser and E.A. Marks and R.A. Rosu and S. Behnke and L. Klingbeil and H. Kuhlmann and C. Stachniss and A.-K. Mahlein and S. Paulus},
    title = {{A 3D Printed Plant Model for Accurate and Reliable 3D Plant Phenotyping}},
    journal = giga,
    volume = {13},
    number = {},
    pages = {giae035},
    issn = {2047-217X},
    year = 2024,
    doi = {10.1093/gigascience/giae035},
    url = {https://academic.oup.com/gigascience/article-pdf/doi/10.1093/gigascience/giae035/58270533/giae035.pdf},
    }

  • W. Förstner, Collected Notes, Institute for Geodesy and Geoinformation, StachnissLab, 2024.
    [BibTeX] [PDF]
    @Book{foerstner2024collected,
    author = {Wolfgang F{\"{o}}rstner},
    date = {2024},
    title = {Collected Notes},
    publisher = {Institute for Geodesy and Geoinformation, StachnissLab},
    year = {2024},
    url = {https://www.ipb.uni-bonn.de/html/staff/WolfgangFoerstner/collectednotes_v2/main-Lecturenotes.pdf},
    }

  • W. Förstner, “Cinderella Animations,” , 2024.
    [BibTeX] [PDF] [Code]
    @Report{foerstner2024cinderella,
    author = {Wolfgang F{\"{o}}rstner},
    date = {2024},
    institution = {{Institute for Geodesy and Geoinformation, StachnissLab}},
    title = {{Cinderella Animations}},
    type = {techreport},
    url = {https://www.ipb.uni-bonn.de/html/staff/WolfgangFoerstner/collectednotes_v2/Cinderella-Animations.pdf},
    year = {2024},
    codeurl = {https://github.com/PRBonn/cinderella-geometric-animations},
    }

  • H. Storm, S. J. Seidel, L. Klingbeil, F. Ewert, H. Vereecken, W. Amelung, S. Behnke, M. Bennewitz, J. Börner, T. Döring, J. Gall, A. -K. Mahlein, C. McCool, U. Rascher, S. Wrobel, A. Schnepf, C. Stachniss, and H. Kuhlmann, “Research Priorities to Leverage Smart Digital Technologies for Sustainable Crop Production,” European Journal of Agronomy, vol. 156, p. 127178, 2024. doi:https://doi.org/10.1016/j.eja.2024.127178
    [BibTeX] [PDF] [Video]
    @article{storm2024eja,
    author = {H. Storm and S.J. Seidel and L. Klingbeil and F. Ewert and H. Vereecken and W. Amelung and S. Behnke and M. Bennewitz and J. B\"orner and T. D\"oring and J. Gall and A.-K. Mahlein and C. McCool and U. Rascher and S. Wrobel and A. Schnepf and C. Stachniss and H. Kuhlmann},
    title = {{Research Priorities to Leverage Smart Digital Technologies for Sustainable Crop Production}},
    journal = {European Journal of Agronomy},
    volume = {156},
    pages = {127178},
    year = {2024},
    issn = {1161-0301},
    doi = {https://doi.org/10.1016/j.eja.2024.127178},
    url = {https://www.sciencedirect.com/science/article/pii/S1161030124000996},
    videourl = {https://youtu.be/uQpH6v9cY68?si=X3y36rBr1tEcso1r}
    }

  • J. Hertzberg, B. Kisliuk, J. C. Krause, and C. Stachniss, “Interview: Cyrill Stachniss’ View on AI in Agriculture,” German Journal of Artificial Intelligence (KI), 2024. doi:10.1007/s13218-023-00831-8
    [BibTeX] [PDF]
    @article{hertzberg2024ki,
    author = {J. Hertzberg and B. Kisliuk and J.C. Krause and C. Stachniss},
    title = {{Interview: Cyrill Stachniss’ View on AI in Agriculture}},
    journal = {German Journal of Artificial Intelligence (KI)},
    year = {2024},
    doi = {10.1007/s13218-023-00831-8},
    url = {https://link.springer.com/article/10.1007/s13218-023-00831-8},
    }

  • M. Sodano, F. Magistri, L. Nunes, J. Behley, and C. Stachniss, “Open-World Semantic Segmentation Including Class Similarity,” in Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2024.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{sodano2024cvpr,
    author = {M. Sodano and F. Magistri and L. Nunes and J. Behley and C. Stachniss},
    title = {{Open-World Semantic Segmentation Including Class Similarity}},
    booktitle = cvpr,
    year = 2024,
    codeurl = {https://github.com/PRBonn/ContMAV},
    videourl = {https://youtu.be/ei2cbyPQgag?si=_KabYyfjzzJZi1Zy},
    }

  • L. Nunes, R. Marcuzzi, B. Mersch, J. Behley, and C. Stachniss, “Scaling Diffusion Models to Real-World 3D LiDAR Scene Completion,” in Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2024.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{nunes2024cvpr,
    author = {L. Nunes and R. Marcuzzi and B. Mersch and J. Behley and C. Stachniss},
    title = {{Scaling Diffusion Models to Real-World 3D LiDAR Scene Completion}},
    booktitle = cvpr,
    year = 2024,
    codeurl = {https://github.com/PRBonn/LiDiff},
    videourl = {https://youtu.be/XWu8svlMKUo},
    }

  • X. Zhong, Y. Pan, C. Stachniss, and J. Behley, “3D LiDAR Mapping in Dynamic Environments using a 4D Implicit Neural Representation,” in Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2024.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{zhong2024cvpr,
    author = {X. Zhong and Y. Pan and C. Stachniss and J. Behley},
    title = {{3D LiDAR Mapping in Dynamic Environments using a 4D Implicit Neural Representation}},
    booktitle = cvpr,
    year = 2024,
    codeurl = {https://github.com/PRBonn/4dNDF},
    videourl ={https://youtu.be/pRNKRcTkxjs}
    }

  • H. Yin, X. Xu, S. Lu, X. Chen, R. Xiong, S. Shen, C. Stachniss, and Y. Wang, “A Survey on Global LiDAR Localization: Challenges, Advances and Open Problems,” Intl. Journal of Computer Vision (IJCV), 2024. doi:10.1007/s11263-024-02019-5
    [BibTeX] [PDF]
    @article{yin2024ijcv,
    author = {H. Yin and X. Xu and S. Lu and X. Chen and R. Xiong and S. Shen and C. Stachniss and Y. Wang},
    title = {{A Survey on Global LiDAR Localization: Challenges, Advances and Open Problems}},
    journal = {Intl. Journal of Computer Vision (IJCV)},
    volume = {},
    number = {},
    pages = {},
    year = 2024,
    doi = {10.1007/s11263-024-02019-5},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/yin2024ijcv-preprint.pdf},
    }

  • S. Pan, L. Jin, X. Huang, C. Stachniss, M. Popovic, and M. Bennewitz, “Exploiting Priors from 3D Diffusion Models for RGB-Based One-Shot View Planning,” in In Proc. of the ICRA Workshop On Neural Fields In Robotics (RoboNerF), 2024.
    [BibTeX]
    @inproceedings{pan2024icraws,
    title={{Exploiting Priors from 3D Diffusion Models for {RGB}-Based One-Shot View Planning}},
    author={S. Pan and L. Jin and X. Huang and C. Stachniss and M. Popovic and M. Bennewitz},
    booktitle={In Proc. of the ICRA Workshop On Neural Fields In Robotics (RoboNerF)},
    year={2024},
    }

  • I. Hroob, B. Mersch, C. Stachniss, and M. Hanheide, “Generalizable Stable Points Segmentation for 3D LiDAR Scan-to-Map Long-Term Localization,” IEEE Robotics and Automation Letters (RA-L), vol. 9, iss. 4, pp. 3546-3553, 2024. doi:10.1109/LRA.2024.3368236
    [BibTeX] [PDF] [Code] [Video]
    @article{hroob2024ral,
    author = {I. Hroob and B. Mersch and C. Stachniss and M. Hanheide},
    title = {{Generalizable Stable Points Segmentation for 3D LiDAR Scan-to-Map Long-Term Localization}},
    journal = ral,
    volume = {9},
    number = {4},
    pages = {3546-3553},
    year = 2024,
    doi = {10.1109/LRA.2024.3368236},
    videourl = {https://youtu.be/aRLStFQEXbc},
    codeurl = {https://github.com/ibrahimhroob/SPS},
    }

  • M. Zeller, D. Casado Herraez, J. Behley, M. Heidingsfeld, and C. Stachniss, “Radar Tracker: Moving Instance Tracking in Sparse and Noisy Radar Point Clouds,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2024.
    [BibTeX] [PDF] [Video]
    @inproceedings{zeller2024icra,
    author = {M. Zeller and Casado Herraez, Daniel and J. Behley and M. Heidingsfeld and C. Stachniss},
    title = {{Radar Tracker: Moving Instance Tracking in Sparse and Noisy Radar Point Clouds}},
    booktitle = icra,
    year = 2024,
    videourl = {https://youtu.be/PixfkN8cMig},
    }

  • D. Casado Herraez, M. Zeller, L. Chang, I. Vizzo, M. Heidingsfeld, and C. Stachniss, “Radar-Only Odometry and Mapping for Autonomous Vehicles,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2024.
    [BibTeX] [PDF] [Video]
    @inproceedings{casado-herraez2024icra,
    author = {Casado Herraez, Daniel and M. Zeller and Chang, Le and I. Vizzo and M. Heidingsfeld and C. Stachniss},
    title = {{Radar-Only Odometry and Mapping for Autonomous Vehicles}},
    booktitle = icra,
    year = 2024,
    videourl = {https://youtu.be/_xWDXyyKEok}
    }

  • M. V. R. Malladi, T. Guadagnino, L. Lobefaro, M. Mattamala, H. Griess, J. Schweier, N. Chebrolu, M. Fallon, J. Behley, and C. Stachniss, “Tree Instance Segmentation and Traits Estimation for Forestry Environments Exploiting LiDAR Data ,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2024.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{malladi2024icra,
    author = {M.V.R. Malladi and T. Guadagnino and L. Lobefaro and M. Mattamala and H. Griess and J. Schweier and N. Chebrolu and M. Fallon and J. Behley and C. Stachniss},
    title = {{Tree Instance Segmentation and Traits Estimation for Forestry Environments Exploiting LiDAR Data }},
    booktitle = icra,
    year = 2024,
    videourl = {https://youtu.be/14uuCxmfGco},
    codeurl = {https://github.com/PRBonn/forest_inventory_pipeline},
    }

  • F. Magistri, R. Marcuzzi, E. A. Marks, M. Sodano, J. Behley, and C. Stachniss, “Efficient and Accurate Transformer-Based 3D Shape Completion and Reconstruction of Fruits for Agricultural Robots,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2024.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{magistri2024icra,
    author = {F. Magistri and R. Marcuzzi and E.A. Marks and M. Sodano and J. Behley and C. Stachniss},
    title = {{Efficient and Accurate Transformer-Based 3D Shape Completion and Reconstruction of Fruits for Agricultural Robots}},
    booktitle = icra,
    year = 2024,
    videourl = {https://youtu.be/U1xxnUGrVL4},
    codeurl = {https://github.com/PRBonn/TCoRe},
    }

  • S. Gupta, T. Guadagnino, B. Mersch, I. Vizzo, and C. Stachniss, “Effectively Detecting Loop Closures using Point Cloud Density Maps,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2024.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{gupta2024icra,
    author = {S. Gupta and T. Guadagnino and B. Mersch and I. Vizzo and C. Stachniss},
    title = {{Effectively Detecting Loop Closures using Point Cloud Density Maps}},
    booktitle = icra,
    year = 2024,
    codeurl = {https://github.com/PRBonn/MapClosures},
    videourl = {https://youtu.be/BpwR_aLXrNo},
    }

  • Y. Wu, T. Guadagnino, L. Wiesmann, L. Klingbeil, C. Stachniss, and H. Kuhlmann, “LIO-EKF: High Frequency LiDAR-Inertial Odometry using Extended Kalman Filters,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2024.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{wu2024icra,
    author = {Y. Wu and T. Guadagnino and L. Wiesmann and L. Klingbeil and C. Stachniss and H. Kuhlmann},
    title = {{LIO-EKF: High Frequency LiDAR-Inertial Odometry using Extended Kalman Filters}},
    booktitle = icra,
    year = 2024,
    codeurl = {https://github.com/YibinWu/LIO-EKF},
    videourl = {https://youtu.be/MoJTqEYl1ME},
    }

  • M. Zeller, V. S. Sandhu, B. Mersch, J. Behley, M. Heidingsfeld, and C. Stachniss, “Radar Instance Transformer: Reliable Moving Instance Segmentation in Sparse Radar Point Clouds,” tro, vol. 40, pp. 2357-2372, 2024. doi:10.1109/TRO.2023.3338972
    [BibTeX] [PDF] [Video]
    @article{zeller2024tro,
    author = {M. Zeller and Sandhu, V.S. and B. Mersch and J. Behley and M. Heidingsfeld and C. Stachniss},
    title = {{Radar Instance Transformer: Reliable Moving Instance Segmentation in Sparse Radar Point Clouds}},
    journal = tro,
    year = {2024},
    volume = {40},
    doi = {10.1109/TRO.2023.3338972},
    pages = {2357-2372},
    videourl = {https://www.youtube.com/watch?v=v-iXbJEcqPM}
    }

  • J. Rückin, F. Magistri, C. Stachniss, and M. Popović, “Semi-Supervised Active Learning for Semantic Segmentation in Unknown Environments Using Informative Path Planning,” IEEE Robotics and Automation Letters (RA-L), vol. 9, iss. 3, pp. 2662-2669, 2024. doi:10.1109/LRA.2024.3359970
    [BibTeX] [PDF] [Code]
    @article{rueckin2024ral,
    author = {J. R\"uckin and F. Magistri and C. Stachniss and M. Popovi\'c},
    title = {{Semi-Supervised Active Learning for Semantic Segmentation in Unknown Environments Using Informative Path Planning}},
    journal = ral,
    year = {2024},
    volume = {9},
    number = {3},
    pages = {2662-2669},
    issn = {2377-3766},
    doi = {10.1109/LRA.2024.3359970},
    codeurl = {https://github.com/dmar-bonn/ipp-ssl},
    }

  • J. Weyler, T. Läbe, J. Behley, and C. Stachniss, “Panoptic Segmentation with Partial Annotations for Agricultural Robots,” IEEE Robotics and Automation Letters (RA-L), vol. 9, iss. 2, pp. 1660-1667, 2024. doi:10.1109/LRA.2023.3346760
    [BibTeX] [PDF] [Code]
    @article{weyler2024ral,
    author = {J. Weyler and T. L\"abe and J. Behley and C. Stachniss},
    title = {{Panoptic Segmentation with Partial Annotations for Agricultural Robots}},
    journal = ral,
    year = {2024},
    volume = {9},
    number = {2},
    pages = {1660-1667},
    issn = {2377-3766},
    doi = {10.1109/LRA.2023.3346760},
    codeurl = {https://github.com/PRBonn/PSPA}
    }

  • C. Smitt, M. A. Halstead, P. Zimmer, T. Läbe, E. Guclu, C. Stachniss, and C. S. McCool, “PAg-NeRF: Towards fast and efficient end-to-end panoptic 3D representations for agricultural robotics,” IEEE Robotics and Automation Letters (RA-L), vol. 9, iss. 1, pp. 907-914, 2024. doi:10.1109/LRA.2023.3338515
    [BibTeX] [PDF] [Code]
    @article{smitt2024ral-pagn,
    author = {C. Smitt and M.A. Halstead and P. Zimmer and T. L\"abe and E. Guclu and C. Stachniss and C.S. McCool},
    title = {{PAg-NeRF: Towards fast and efficient end-to-end panoptic 3D representations for agricultural robotics}},
    journal = ral,
    year = {2024},
    volume = {9},
    number = {1},
    pages = {907-914},
    issn = {2377-3766},
    doi = {10.1109/LRA.2023.3338515},
    codeurl = {https://github.com/Agricultural-Robotics-Bonn/pagnerf}
    }

2023

  • C. Gomez, A. C. Hernandez, R. Barber, and C. Stachniss, “Localization Exploiting Semantic and Metric Information in Non-static Indoor Environments,” , vol. 109, iss. 86, 2023. doi:https://doi.org/10.1007/s10846-023-02021-y
    [BibTeX] [PDF]
    @article{gomez2023jint,
    author = {C. Gomez and A.C. Hernandez and R. Barber and C. Stachniss},
    title = {Localization Exploiting Semantic and Metric Information in Non-static Indoor Environments},
    journal = jint,
    year = {2023},
    volume = {109},
    number = {86},
    doi = {https://doi.org/10.1007/s10846-023-02021-y},
    }

  • R. Marcuzzi, L. Nunes, L. Wiesmann, E. Marks, J. Behley, and C. Stachniss, “Mask4D: End-to-End Mask-Based 4D Panoptic Segmentation for LiDAR Sequences,” IEEE Robotics and Automation Letters (RA-L), vol. 8, iss. 11, pp. 7487-7494, 2023. doi:10.1109/LRA.2023.3320020
    [BibTeX] [PDF] [Code] [Video]
    @article{marcuzzi2023ral-meem,
    author = {R. Marcuzzi and L. Nunes and L. Wiesmann and E. Marks and J. Behley and C. Stachniss},
    title = {{Mask4D: End-to-End Mask-Based 4D Panoptic Segmentation for LiDAR Sequences}},
    journal = ral,
    year = {2023},
    volume = {8},
    number = {11},
    pages = {7487-7494},
    issn = {2377-3766},
    doi = {10.1109/LRA.2023.3320020},
    codeurl = {https://github.com/PRBonn/Mask4D},
    videourl = {https://youtu.be/4WqK_gZlpfA},
    }

  • G. Roggiolani, F. Magistri, T. Guadagnino, J. Behley, and C. Stachniss, “Unsupervised Pre-Training for 3D Leaf Instance Segmentation,” IEEE Robotics and Automation Letters (RA-L), vol. 8, pp. 7448-7455, 2023. doi:10.1109/LRA.2023.3320018
    [BibTeX] [PDF] [Code] [Video]
    @article{roggiolani2023ral,
    author = {G. Roggiolani and F. Magistri and T. Guadagnino and J. Behley and C. Stachniss},
    title = {{Unsupervised Pre-Training for 3D Leaf Instance Segmentation}},
    journal = ral,
    year = {2023},
    volume = {8},
    issue = {11},
    codeurl = {https://github.com/PRBonn/Unsupervised-Pre-Training-for-3D-Leaf-Instance-Segmentation},
    pages = {7448-7455},
    doi = {10.1109/LRA.2023.3320018},
    issn = {2377-3766},
    videourl = {https://youtu.be/PbYVPPwVeKg},
    }

  • J. Rückin, F. Magistri, C. Stachniss, and M. Popovic, “An Informative Path Planning Framework for Active Learning in UAV-based Semantic Mapping,” tro, vol. 39, iss. 6, pp. 4279-4296, 2023. doi:10.1109/TRO.2023.3313811
    [BibTeX] [PDF] [Code]
    @article{rueckin2023tro,
    author = {J. R\"{u}ckin and F. Magistri and C. Stachniss and M. Popovic},
    title = {{An Informative Path Planning Framework for Active Learning in UAV-based Semantic Mapping}},
    journal = tro,
    year = {2023},
    codeurl = {https://github.com/dmar-bonn/ipp-al-framework},
    doi={10.1109/TRO.2023.3313811},
    volume={39},
    number={6},
    pages={4279-4296},
    }

  • F. Magistri, J. Weyler, D. Gogoll, P. Lottes, J. Behley, N. Petrinic, and C. Stachniss, “From one Field to Another – Unsupervised Domain Adaptation for Semantic Segmentation in Agricultural Robotics,” Computers and Electronics in Agriculture, vol. 212, p. 108114, 2023. doi:https://doi.org/10.1016/j.compag.2023.108114
    [BibTeX] [PDF]
    @article{magistri2023cea,
    author = {F. Magistri and J. Weyler and D. Gogoll and P. Lottes and J. Behley and N. Petrinic and C. Stachniss},
    title = {From one Field to Another – Unsupervised Domain Adaptation for Semantic Segmentation in Agricultural Robotics},
    journal = cea,
    year = {2023},
    volume = {212},
    pages = {108114},
    doi = {https://doi.org/10.1016/j.compag.2023.108114},
    }

  • I. Vizzo, B. Mersch, L. Nunes, L. Wiesmann, T. Guadagnino, and C. Stachniss, “Toward Reproducible Version-Controlled Perception Platforms: Embracing Simplicity in Autonomous Vehicle Dataset Acquisition,” in Proc. of the Intl. Conf. on Intelligent Transportation Systems Workshops, 2023.
    [BibTeX] [PDF] [Code]
    @inproceedings{vizzo2023itcsws,
    author = {I. Vizzo and B. Mersch and L. Nunes and L. Wiesmann and T. Guadagnino and C. Stachniss},
    title = {{Toward Reproducible Version-Controlled Perception Platforms: Embracing Simplicity in Autonomous Vehicle Dataset Acquisition}},
    booktitle = {Proc. of the Intl. Conf. on Intelligent Transportation Systems Workshops},
    year = 2023,
    codeurl = {https://github.com/ipb-car/meta-workspace},
    note = {accepted}
    }

  • B. Mersch, T. Guadagnino, X. Chen, I. Vizzo, J. Behley, and C. Stachniss, “Building Volumetric Beliefs for Dynamic Environments Exploiting Map-Based Moving Object Segmentation,” IEEE Robotics and Automation Letters (RA-L), vol. 8, iss. 8, pp. 5180-5187, 2023. doi:10.1109/LRA.2023.3292583
    [BibTeX] [PDF] [Code] [Video]
    @article{mersch2023ral,
    author = {B. Mersch and T. Guadagnino and X. Chen and I. Vizzo and J. Behley and C. Stachniss},
    title = {{Building Volumetric Beliefs for Dynamic Environments Exploiting Map-Based Moving Object Segmentation}},
    journal = ral,
    volume = {8},
    number = {8},
    pages = {5180-5187},
    year = 2023,
    issn = {2377-3766},
    doi = {10.1109/LRA.2023.3292583},
    videourl = {https://youtu.be/aeXhvkwtDbI},
    codeurl = {https://github.com/PRBonn/MapMOS},
    }

  • Y. L. Chong, J. Weyler, P. Lottes, J. Behley, and C. Stachniss, “Unsupervised Generation of Labeled Training Images for Crop-Weed Segmentation in New Fields and on Different Robotic Platforms,” IEEE Robotics and Automation Letters (RA-L), vol. 8, iss. 8, p. 5259–5266, 2023. doi:10.1109/LRA.2023.3293356
    [BibTeX] [PDF] [Code] [Video]
    @article{chong2023ral,
    author = {Y.L. Chong and J. Weyler and P. Lottes and J. Behley and C. Stachniss},
    title = {{Unsupervised Generation of Labeled Training Images for Crop-Weed Segmentation in New Fields and on Different Robotic Platforms}},
    journal = ral,
    volume = {8},
    number = {8},
    pages = {5259--5266},
    year = 2023,
    issn = {2377-3766},
    doi = {10.1109/LRA.2023.3293356},
    videourl = {https://youtu.be/SpvrR9sgf2k},
    codeurl = {https://github.com/PRBonn/StyleGenForLabels}
    }

  • L. Lobefaro, M. V. R. Malladi, O. Vysotska, T. Guadagnino, and C. Stachniss, “Estimating 4D Data Associations Towards Spatial-Temporal Mapping of Growing Plants for Agricultural Robots,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2023.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{lobefaro2023iros,
    author = {L. Lobefaro and M.V.R. Malladi and O. Vysotska and T. Guadagnino and C. Stachniss},
    title = {{Estimating 4D Data Associations Towards Spatial-Temporal Mapping of Growing Plants for Agricultural Robots}},
    booktitle = iros,
    year = 2023,
    codeurl = {https://github.com/PRBonn/plants_temporal_matcher},
    videourl = {https://youtu.be/HpJPIzmXoag}
    }

  • Y. Pan, F. Magistri, T. Läbe, E. Marks, C. Smitt, C. S. McCool, J. Behley, and C. Stachniss, “Panoptic Mapping with Fruit Completion and Pose Estimation for Horticultural Robots,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2023.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{pan2023iros,
    author = {Y. Pan and F. Magistri and T. L\"abe and E. Marks and C. Smitt and C.S. McCool and J. Behley and C. Stachniss},
    title = {{Panoptic Mapping with Fruit Completion and Pose Estimation for Horticultural Robots}},
    booktitle = iros,
    year = 2023,
    codeurl = {https://github.com/PRBonn/HortiMapping},
    videourl = {https://youtu.be/fSyHBhskjqA}
    }

  • Y. Goel, N. Vaskevicius, L. Palmieri, N. Chebrolu, K. O. Arras, and C. Stachniss, “Semantically Informed MPC for Context-Aware Robot Exploration,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2023.
    [BibTeX] [PDF]
    @inproceedings{goel2023iros,
    author = {Y. Goel and N. Vaskevicius and L. Palmieri and N. Chebrolu and K.O. Arras and C. Stachniss},
    title = {{Semantically Informed MPC for Context-Aware Robot Exploration}},
    booktitle = iros,
    year = 2023,
    }

  • N. Zimmerman, M. Sodano, E. Marks, J. Behley, and C. Stachniss, “Constructing Metric-Semantic Maps using Floor Plan Priors for Long-Term Indoor Localization,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2023.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{zimmerman2023iros,
    author = {N. Zimmerman and M. Sodano and E. Marks and J. Behley and C. Stachniss},
    title = {{Constructing Metric-Semantic Maps using Floor Plan Priors for Long-Term Indoor Localization}},
    booktitle = iros,
    year = 2023,
    codeurl = {https://github.com/PRBonn/SIMP},
    videourl = {https://youtu.be/9ZGd5lJbG4s}
    }

  • L. Jin, X. Chen, J. Rückin, and M. Popović, “NeU-NBV: Next Best View Planning Using Uncertainty Estimation in Image-Based Neural Rendering,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2023.
    [BibTeX] [PDF] [Code]
    @inproceedings{jin2023iros,
    title = {{NeU-NBV: Next Best View Planning Using Uncertainty Estimation in Image-Based Neural Rendering}},
    booktitle = iros,
    author = {Jin, Liren and Chen, Xieyuanli and Rückin, Julius and Popović, Marija},
    year = {2023},
    codeurl = {https://github.com/dmar-bonn/neu-nbv},
    }

  • J. Westheider, J. Rückin, and M. Popović, “Multi-UAV Adaptive Path Planning Using Deep Reinforcement Learning,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2023.
    [BibTeX] [PDF] [Code]
    @inproceedings{westheider2023iros,
    title = {{Multi-UAV Adaptive Path Planning Using Deep Reinforcement Learning}},
    author = {Westheider, Jonas and R{\"u}ckin, Julius and Popovi{\'c}, Marija},
    booktitle = iros,
    year = {2023},
    codeurl = {https://github.com/dmar-bonn/ipp-marl},
    }

  • T. Zaenker, J. Rückin, R. Menon, M. Popović, and M. Bennewitz, “Graph-based view motion planning for fruit detection,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2023.
    [BibTeX] [PDF]
    @inproceedings{zaenker2023iros,
    title = {{Graph-based view motion planning for fruit detection}},
    author = {Zaenker, Tobias and R{\"u}ckin, Julius and Menon, Rohit and Popovi{\'c}, Marija and Bennewitz, Maren},
    booktitle = iros,
    year = {2023},
    }

  • J. Weyler, F. Magistri, E. Marks, Y. L. Chong, M. Sodano, G. Roggiolani, N. Chebrolu, C. Stachniss, and J. Behley, “PhenoBench –- A Large Dataset and Benchmarks for Semantic Image Interpretation in the Agricultural Domain,” arXiv preprint, vol. arXiv:2306.04557, 2023.
    [BibTeX] [PDF] [Code]
    @article{weyler2023arxiv,
    author = {Jan Weyler and Federico Magistri and Elias Marks and Yue Linn Chong and Matteo Sodano
    and Gianmarco Roggiolani and Nived Chebrolu and Cyrill Stachniss and Jens Behley},
    title = {{PhenoBench --- A Large Dataset and Benchmarks for Semantic Image Interpretation
    in the Agricultural Domain}},
    journal = {arXiv preprint},
    volume = {arXiv:2306.04557},
    year = {2023},
    codeurl = {https://github.com/PRBonn/phenobench}
    }

  • W. Förstner, “Friedrich Ackermann’s scientific research program,” Geo-spatial Information Science, pp. 1-10, 2023. doi:10.1080/10095020.2023.2231273
    [BibTeX] [PDF]
    @Article{foerstner23:friedrich,
    author = {Wolfgang Förstner},
    journal = {Geo-spatial Information Science},
    title = {Friedrich Ackermann’s scientific research program},
    year = {2023},
    number = {0},
    pages = {1-10},
    volume = {0},
    doi = {10.1080/10095020.2023.2231273},
    eprint = {https://doi.org/10.1080/10095020.2023.2231273},
    publisher = {Taylor & Francis},
    url = {https://www.tandfonline.com/doi/pdf/10.1080/10095020.2023.2231273?download=true},
    }

  • D. Barath, D. Mishkin, M. Polic, W. Förstner, and J. Matas, “A Large-Scale Homography Benchmark,” in Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2023, pp. 21360-21370.
    [BibTeX] [PDF]
    @InProceedings{Barath2023cvpr,
    author = {Barath, Daniel and Mishkin, Dmytro and Polic, Michal and F\"orstner, Wolfgang and Matas, Jiri},
    title = {A Large-Scale Homography Benchmark},
    booktitle = cvpr,
    year = {2023},
    pages = {21360-21370},
    url = {https://openaccess.thecvf.com/content/CVPR2023/papers/Barath_A_Large-Scale_Homography_Benchmark_CVPR_2023_paper.pdf},
    }

  • L. Wiesmann, T. Guadagnino, I. Vizzo, N. Zimmerman, Y. Pan, H. Kuang, J. Behley, and C. Stachniss, “LocNDF: Neural Distance Field Mapping for Robot Localization,” IEEE Robotics and Automation Letters (RA-L), vol. 8, iss. 8, p. 4999–5006, 2023. doi:10.1109/LRA.2023.3291274
    [BibTeX] [PDF] [Code] [Video]
    @article{wiesmann2023ral-icra,
    author = {L. Wiesmann and T. Guadagnino and I. Vizzo and N. Zimmerman and Y. Pan and H. Kuang and J. Behley and C. Stachniss},
    title = {{LocNDF: Neural Distance Field Mapping for Robot Localization}},
    journal = ral,
    volume = {8},
    number = {8},
    pages = {4999--5006},
    year = 2023,
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/wiesmann2023ral-icra.pdf},
    issn = {2377-3766},
    doi = {10.1109/LRA.2023.3291274},
    codeurl = {https://github.com/PRBonn/LocNDF},
    videourl = {https://youtu.be/-0idH21BpMI},
    }

  • E. Marks, M. Sodano, F. Magistri, L. Wiesmann, D. Desai, R. Marcuzzi, J. Behley, and C. Stachniss, “High Precision Leaf Instance Segmentation in Point Clouds Obtained Under Real Field Conditions,” IEEE Robotics and Automation Letters (RA-L), vol. 8, iss. 8, pp. 4791-4798, 2023. doi:10.1109/LRA.2023.3288383
    [BibTeX] [PDF] [Code] [Video]
    @article{marks2023ral,
    author = {E. Marks and M. Sodano and F. Magistri and L. Wiesmann and D. Desai and R. Marcuzzi and J. Behley and C. Stachniss},
    title = {{High Precision Leaf Instance Segmentation in Point Clouds Obtained Under Real Field Conditions}},
    journal = ral,
    pages = {4791-4798},
    volume = {8},
    number = {8},
    issn = {2377-3766},
    year = {2023},
    doi = {10.1109/LRA.2023.3288383},
    codeurl = {https://github.com/PRBonn/plant_pcd_segmenter},
    videourl = {https://youtu.be/dvA1SvQ4iEY}
    }

  • L. Peters, V. Rubies Royo, C. Tomlin, L. Ferranti, J. Alonso-Mora, C. Stachniss, and D. Fridovich-Keil, “Online and Offline Learning of Player Objectives from Partial Observations in Dynamic Games,” Intl. Journal of Robotics Research (IJRR), 2023.
    [BibTeX] [PDF] [Code] [Video]
    @article{peters2023ijrr,
    title = {{Online and Offline Learning of Player Objectives from Partial Observations in Dynamic Games}},
    author = {Peters, L. and Rubies Royo, V. and Tomlin, C. and Ferranti, L. and Alonso-Mora, J. and Stachniss, C. and Fridovich-Keil, D.},
    journal = ijrr,
    year = {2023},
    url = {https://journals.sagepub.com/doi/reader/10.1177/02783649231182453},
    codeurl = {https://github.com/PRBonn/PartiallyObservedInverseGames.jl},
    videourl = {https://www.youtube.com/watch?v=BogCsYQX9Pc},
    }

  • H. Lim, L. Nunes, B. Mersch, X. Chen, J. Behley, H. Myung, and C. Stachniss, “ERASOR2: Instance-Aware Robust 3D Mapping of the Static World in Dynamic Scenes,” in Proc. of Robotics: Science and Systems (RSS), 2023.
    [BibTeX] [PDF]
    @inproceedings{lim2023rss,
    author = {H. Lim and L. Nunes and B. Mersch and X. Chen and J. Behley and H. Myung and C. Stachniss},
    title = {{ERASOR2: Instance-Aware Robust 3D Mapping of the Static World in Dynamic Scenes}},
    booktitle = rss,
    year = 2023,
    }

  • J. Weyler, T. Läbe, F. Magistri, J. Behley, and C. Stachniss, “Towards Domain Generalization in Crop and Weed Segmentation for Precision Farming Robots,” IEEE Robotics and Automation Letters (RA-L), vol. 8, iss. 6, pp. 3310-3317, 2023. doi:10.1109/LRA.2023.3262417
    [BibTeX] [PDF] [Code]
    @article{weyler2023ral,
    author = {J. Weyler and T. L\"abe and F. Magistri and J. Behley and C. Stachniss},
    title = {{Towards Domain Generalization in Crop and Weed Segmentation for Precision Farming Robots}},
    journal = ral,
    pages = {3310-3317},
    volume = 8,
    number = 6,
    issn = {2377-3766},
    year = {2023},
    doi = {10.1109/LRA.2023.3262417},
    codeurl = {https://github.com/PRBonn/DG-CWS},
    }

  • L. Nunes, L. Wiesmann, R. Marcuzzi, X. Chen, J. Behley, and C. Stachniss, “Temporal Consistent 3D LiDAR Representation Learning for Semantic Perception in Autonomous Driving,” in Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2023.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{nunes2023cvpr,
    author = {L. Nunes and L. Wiesmann and R. Marcuzzi and X. Chen and J. Behley and C. Stachniss},
    title = {{Temporal Consistent 3D LiDAR Representation Learning for Semantic Perception in Autonomous Driving}},
    booktitle = cvpr,
    year = 2023,
    codeurl = {https://github.com/PRBonn/TARL},
    videourl = {https://youtu.be/0CtDbwRYLeo},
    }

  • H. Kuang, X. Chen, T. Guadagnino, N. Zimmerman, J. Behley, and C. Stachniss, “IR-MCL: Implicit Representation-Based Online Global Localization,” IEEE Robotics and Automation Letters (RA-L), vol. 8, iss. 3, p. 1627–1634, 2023. doi:10.1109/LRA.2023.3239318
    [BibTeX] [PDF] [Code]
    @article{kuang2023ral,
    author = {Kuang, Haofei and Chen, Xieyuanli and Guadagnino, Tiziano and Zimmerman, Nicky and Behley, Jens and Stachniss, Cyrill},
    title = {{IR-MCL: Implicit Representation-Based Online Global Localization}},
    journal = ral,
    volume = {8},
    number = {3},
    pages = {1627--1634},
    doi = {10.1109/LRA.2023.3239318},
    year = {2023},
    codeurl = {https://github.com/PRBonn/ir-mcl},
    }

  • X. Zhong, Y. Pan, J. Behley, and C. Stachniss, “SHINE-Mapping: Large-Scale 3D Mapping Using Sparse Hierarchical Implicit Neural Representations,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2023.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{zhong2023icra,
    author = {Zhong, Xingguang and Pan, Yue and Behley, Jens and Stachniss, Cyrill},
    title = {{SHINE-Mapping: Large-Scale 3D Mapping Using Sparse Hierarchical Implicit Neural Representations}},
    booktitle = icra,
    year = 2023,
    codeurl = {https://github.com/PRBonn/SHINE_mapping},
    videourl = {https://youtu.be/jRqIupJgQZE},
    }

  • M. Sodano, F. Magistri, T. Guadagnino, J. Behley, and C. Stachniss, “Robust Double-Encoder Network for RGB-D Panoptic Segmentation,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2023.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{sodano2023icra,
    author = {Matteo Sodano and Federico Magistri and Tiziano Guadagnino and Jens Behley and Cyrill Stachniss},
    title = {{Robust Double-Encoder Network for RGB-D Panoptic Segmentation}},
    booktitle = icra,
    year = 2023,
    codeurl = {https://github.com/PRBonn/PS-res-excite},
    videourl = {https://youtu.be/r1pabV3sQYk}
    }

  • S. Kelly, A. Riccardi, E. Marks, F. Magistri, T. Guadagnino, M. Chli, and C. Stachniss, “Target-Aware Implicit Mapping for Agricultural Crop Inspection,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2023.
    [BibTeX] [PDF] [Video]
    @inproceedings{kelly2023icra,
    author = {Shane Kelly and Alessandro Riccardi and Elias Marks and Federico Magistri and Tiziano Guadagnino and Margarita Chli and Cyrill Stachniss},
    title = {{Target-Aware Implicit Mapping for Agricultural Crop Inspection}},
    booktitle = icra,
    year = 2023,
    videourl = {https://youtu.be/UAIqn0QnpKg}
    }

  • A. Riccardi, S. Kelly, E. Marks, F. Magistri, T. Guadagnino, J. Behley, M. Bennewitz, and C. Stachniss, “Fruit Tracking Over Time Using High-Precision Point Clouds,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2023.
    [BibTeX] [PDF] [Video]
    @inproceedings{riccardi2023icra,
    author = {Alessandro Riccardi and Shane Kelly and Elias Marks and Federico Magistri and Tiziano Guadagnino and Jens Behley and Maren Bennewitz and Cyrill Stachniss},
    title = {{Fruit Tracking Over Time Using High-Precision Point Clouds}},
    booktitle = icra,
    year = 2023,
    videourl = {https://youtu.be/fBGSd0--PXY}
    }

  • G. Roggiolani, M. Sodano, F. Magistri, T. Guadagnino, J. Behley, and C. Stachniss, “Hierarchical Approach for Joint Semantic, Plant Instance, and Leaf Instance Segmentation in the Agricultural Domain,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2023.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{roggiolani2023icra-hajs,
    author = {G. Roggiolani and M. Sodano and F. Magistri and T. Guadagnino and J. Behley and C. Stachniss},
    title = {{Hierarchical Approach for Joint Semantic, Plant Instance, and Leaf Instance Segmentation in the Agricultural Domain}},
    booktitle = icra,
    year = {2023},
    codeurl = {https://github.com/PRBonn/HAPT},
    videourl = {https://youtu.be/miuOJjxlJic}
    }

  • G. Roggiolani, F. Magistri, T. Guadagnino, J. Weyler, G. Grisetti, C. Stachniss, and J. Behley, “On Domain-Specific Pre-Training for Effective Semantic Perception in Agricultural Robotics,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2023.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{roggiolani2023icra-odsp,
    author = {G. Roggiolani and F. Magistri and T. Guadagnino and J. Weyler and G. Grisetti and C. Stachniss and J. Behley},
    title = {{On Domain-Specific Pre-Training for Effective Semantic Perception in Agricultural Robotics}},
    booktitle = icra,
    year = 2023,
    codeurl= {https://github.com/PRBonn/agri-pretraining},
    videourl = {https://youtu.be/FDWY_UnfsBs}
    }

  • H. Dong, X. Chen, M. Dusmanu, V. Larsson, M. Pollefeys, and C. Stachniss, “Learning-Based Dimensionality Reduction for Computing Compact and Effective Local Feature Descriptors,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2023.
    [BibTeX] [PDF] [Code]
    @inproceedings{dong2023icra,
    author = {H. Dong and X. Chen and M. Dusmanu and V. Larsson and M. Pollefeys and C. Stachniss},
    title = {{Learning-Based Dimensionality Reduction for Computing Compact and Effective Local Feature Descriptors}},
    booktitle = icra,
    year = 2023,
    codeurl = {https://github.com/PRBonn/descriptor-dr}
    }

  • M. Zeller, V. S. Sandhu, B. Mersch, J. Behley, M. Heidingsfeld, and C. Stachniss, “Radar Velocity Transformer: Single-scan Moving Object Segmentation in Noisy Radar Point Clouds,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2023.
    [BibTeX] [PDF] [Video]
    @inproceedings{zeller2023icra,
    author = {M. Zeller and V.S. Sandhu and B. Mersch and J. Behley and M. Heidingsfeld and C. Stachniss},
    title = {{Radar Velocity Transformer: Single-scan Moving Object Segmentation in Noisy Radar Point Clouds}},
    booktitle = icra,
    year = 2023,
    videourl = {https://youtu.be/dTDgzWIBgpE}
    }

  • I. Vizzo, T. Guadagnino, B. Mersch, L. Wiesmann, J. Behley, and C. Stachniss, “KISS-ICP: In Defense of Point-to-Point ICP – Simple, Accurate, and Robust Registration If Done the Right Way,” IEEE Robotics and Automation Letters (RA-L), vol. 8, iss. 2, pp. 1-8, 2023. doi:10.1109/LRA.2023.3236571
    [BibTeX] [PDF] [Code] [Video]
    @article{vizzo2023ral,
    author = {Vizzo, Ignacio and Guadagnino, Tiziano and Mersch, Benedikt and Wiesmann, Louis and Behley, Jens and Stachniss, Cyrill},
    title = {{KISS-ICP: In Defense of Point-to-Point ICP -- Simple, Accurate, and Robust Registration If Done the Right Way}},
    journal = ral,
    pages = {1-8},
    doi = {10.1109/LRA.2023.3236571},
    volume = {8},
    number = {2},
    year = {2023},
    codeurl = {https://github.com/PRBonn/kiss-icp},
    videourl = {https://youtu.be/h71aGiD-uxU}
    }

  • R. Marcuzzi, L. Nunes, L. Wiesmann, J. Behley, and C. Stachniss, “Mask-Based Panoptic LiDAR Segmentation for Autonomous Driving,” IEEE Robotics and Automation Letters (RA-L), vol. 8, iss. 2, p. 1141–1148, 2023. doi:10.1109/LRA.2023.3236568
    [BibTeX] [PDF] [Code] [Video]
    @article{marcuzzi2023ral,
    author = {R. Marcuzzi and L. Nunes and L. Wiesmann and J. Behley and C. Stachniss},
    title = {{Mask-Based Panoptic LiDAR Segmentation for Autonomous Driving}},
    journal = ral,
    volume = {8},
    number = {2},
    pages = {1141--1148},
    year = 2023,
    doi = {10.1109/LRA.2023.3236568},
    videourl = {https://youtu.be/I8G9VKpZux8},
    codeurl = {https://github.com/PRBonn/MaskPLS},
    }

  • L. Wiesmann, L. Nunes, J. Behley, and C. Stachniss, “KPPR: Exploiting Momentum Contrast for Point Cloud-Based Place Recognition,” IEEE Robotics and Automation Letters (RA-L), vol. 8, iss. 2, pp. 592-599, 2023. doi:10.1109/LRA.2022.3228174
    [BibTeX] [PDF] [Code] [Video]
    @article{wiesmann2023ral,
    author = {L. Wiesmann and L. Nunes and J. Behley and C. Stachniss},
    title = {{KPPR: Exploiting Momentum Contrast for Point Cloud-Based Place Recognition}},
    journal = ral,
    volume = {8},
    number = {2},
    pages = {592-599},
    year = 2023,
    issn = {2377-3766},
    doi = {10.1109/LRA.2022.3228174},
    codeurl = {https://github.com/PRBonn/kppr},
    videourl = {https://youtu.be/bICz1sqd8Xs}
    }

  • Y. Wu, J. Kuang, X. Niu, J. Behley, L. Klingbeil, and H. Kuhlmann, “Wheel-SLAM: Simultaneous Localization and Terrain Mapping Using One Wheel-mounted IMU,” IEEE Robotics and Automation Letters (RA-L), vol. 8, iss. 1, p. 280–287, 2023. doi:10.1109/LRA.2022.3226071
    [BibTeX] [PDF] [Code]
    @article{wu2023ral,
    author = {Y. Wu and J. Kuang and X. Niu and J. Behley and L. Klingbeil and H. Kuhlmann},
    title = {{Wheel-SLAM: Simultaneous Localization and Terrain Mapping Using One Wheel-mounted IMU}},
    journal = ral,
    volume = {8},
    number = {1},
    pages = {280--287},
    year = 2023,
    doi = {10.1109/LRA.2022.3226071},
    codeurl = {https://github.com/i2Nav-WHU/Wheel-SLAM}
    }

  • M. Zeller, J. Behley, M. Heidingsfeld, and C. Stachniss, “Gaussian Radar Transformer for Semantic Segmentation in Noisy Radar Data,” IEEE Robotics and Automation Letters (RA-L), vol. 8, iss. 1, p. 344–351, 2023. doi:10.1109/LRA.2022.3226030
    [BibTeX] [PDF] [Video]
    @article{zeller2023ral,
    author = {M. Zeller and J. Behley and M. Heidingsfeld and C. Stachniss},
    title = {{Gaussian Radar Transformer for Semantic Segmentation in Noisy Radar Data}},
    journal = ral,
    volume = {8},
    number = {1},
    pages = {344--351},
    year = 2023,
    doi = {10.1109/LRA.2022.3226030},
    videourl = {https://youtu.be/uNlNkYoG-tA}
    }

  • N. Zimmerman, T. Guadagnino, X. Chen, J. Behley, and C. Stachniss, “Long-Term Localization using Semantic Cues in Floor Plan Maps,” IEEE Robotics and Automation Letters (RA-L), vol. 8, iss. 1, pp. 176-183, 2023. doi:10.1109/LRA.2022.3223556
    [BibTeX] [PDF] [Code]
    @article{zimmerman2023ral,
    author = {N. Zimmerman and T. Guadagnino and X. Chen and J. Behley and C. Stachniss},
    title = {{Long-Term Localization using Semantic Cues in Floor Plan Maps}},
    journal = ral,
    year = {2023},
    volume = {8},
    number = {1},
    pages = {176-183},
    issn = {2377-3766},
    doi = {10.1109/LRA.2022.3223556},
    codeurl = {https://github.com/PRBonn/hsmcl}
    }

  • H. Müller, N. Zimmerman, T. Polonelli, M. Magno, J. Behley, C. Stachniss, and L. Benini, “Fully On-board Low-Power Localization with Multizone Time-of-Flight Sensors on Nano-UAVs,” in Proc. of Design, Automation & Test in Europe Conference & Exhibition (DATE), 2023.
    [BibTeX] [PDF]
    @inproceedings{mueller2023date,
    title = {{Fully On-board Low-Power Localization with Multizone Time-of-Flight Sensors on Nano-UAVs}},
    author = {H. M{\"u}ller and N. Zimmerman and T. Polonelli and M. Magno and J. Behley and C. Stachniss and L. Benini},
    booktitle = {Proc. of Design, Automation \& Test in Europe Conference \& Exhibition (DATE)},
    year = {2023},
    }

  • M. Arora, L. Wiesmann, X. Chen, and C. Stachniss, “Static Map Generation from 3D LiDAR Point Clouds Exploiting Ground Segmentation,” Journal on Robotics and Autonomous Systems (RAS), vol. 159, p. 104287, 2023. doi:https://doi.org/10.1016/j.robot.2022.104287
    [BibTeX] [PDF] [Code]
    @article{arora2023jras,
    author = {M. Arora and L. Wiesmann and X. Chen and C. Stachniss},
    title = {{Static Map Generation from 3D LiDAR Point Clouds Exploiting Ground Segmentation}},
    journal = jras,
    volume = {159},
    pages = {104287},
    year = {2023},
    issn = {0921-8890},
    doi = {https://doi.org/10.1016/j.robot.2022.104287},
    codeurl = {https://github.com/PRBonn/dynamic-point-removal},
    }

  • F. Stache, J. Westheider, F. Magistri, C. Stachniss, and M. Popovic, “Adaptive Path Planning for UAVs for Multi-Resolution Semantic Segmentation,” Journal on Robotics and Autonomous Systems (RAS), vol. 159, p. 104288, 2023. doi:10.1016/j.robot.2022.104288
    [BibTeX] [PDF]
    @article{stache2023jras,
    author = {F. Stache and J. Westheider and F. Magistri and C. Stachniss and M. Popovic},
    title = {{Adaptive Path Planning for UAVs for Multi-Resolution Semantic Segmentation}},
    journal = jras,
    volume = {159},
    pages = {104288},
    year = {2023},
    issn = {0921-8890},
    doi = {10.1016/j.robot.2022.104288},
    }

  • H. Dong, X. Chen, S. Särkkä, and C. Stachniss, “Online pole segmentation on range images for long-term LiDAR localization in urban environments,” Robotics and Autonomous Systems, vol. 159, p. 104283, 2023. doi:https://doi.org/10.1016/j.robot.2022.104283
    [BibTeX] [PDF] [Code]
    @article{dong2023jras,
    title = {Online pole segmentation on range images for long-term LiDAR localization in urban environments},
    journal = {Robotics and Autonomous Systems},
    volume ={159},
    pages = {104283},
    year = {2023},
    issn = {0921-8890},
    doi = {https://doi.org/10.1016/j.robot.2022.104283},
    author = {H. Dong and X. Chen and S. S{\"a}rkk{\"a} and C. Stachniss},
    codeurl = {https://github.com/PRBonn/pole-localization},
    url = {https://arxiv.org/pdf/2208.07364.pdf},
    }

2022

  • X. Chen, “LiDAR-Based Semantic Perception for Autonomous Vehicles,” PhD Thesis, 2022.
    [BibTeX] [PDF]
    @phdthesis{chen2022phd,
    author = {Xieyuanli Chen},
    title = {{LiDAR-Based Semantic Perception for Autonomous Vehicles}},
    school = {University of Bonn},
    year = 2022,
    month = sep,
    url = {https://hdl.handle.net/20.500.11811/10228},
    urn = https://nbn-resolving.org/urn:nbn:de:hbz:5-67873,
    }

  • L. Di Giammarino, L. Brizi, T. Guadagnino, C. Stachniss, and G. Grisetti, “MD-SLAM: Multi-Cue Direct SLAM,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2022.
    [BibTeX] [PDF] [Code]
    @inproceedings{digiammarino2022iros,
    title={{MD-SLAM: Multi-Cue Direct SLAM}},
    author={Di Giammarino, L. and Brizi, L. and Guadagnino, T. and Stachniss, C. and Grisetti, G.},
    booktitle = iros,
    year = {2022},
    codeurl = {https://github.com/digiamm/md_slam},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/digiammarino2022iros.pdf},
    }

  • N. Zimmerman, L. Wiesmann, T. Guadagnino, T. Läbe, J. Behley, and C. Stachniss, “Robust Onboard Localization in Changing Environments Exploiting Text Spotting,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2022.
    [BibTeX] [PDF] [Code]
    @inproceedings{zimmerman2022iros,
    title = {{Robust Onboard Localization in Changing Environments Exploiting Text Spotting}},
    author = {N. Zimmerman and L. Wiesmann and T. Guadagnino and T. Läbe and J. Behley and C. Stachniss},
    booktitle = iros,
    year = {2022},
    codeurl = {https://github.com/PRBonn/tmcl},
    }

  • Y. Pan, Y. Kompis, L. Bartolomei, R. Mascaro, C. Stachniss, and M. Chli, “Voxfield: Non-Projective Signed Distance Fields for Online Planning and 3D Reconstruction,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2022.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{pan2022iros,
    title = {{Voxfield: Non-Projective Signed Distance Fields for Online Planning and 3D Reconstruction}},
    author = {Y. Pan and Y. Kompis and L. Bartolomei and R. Mascaro and C. Stachniss and M. Chli},
    booktitle = iros,
    year = {2022},
    codeurl = {https://github.com/VIS4ROB-lab/voxfield},
    videourl ={https://youtu.be/JS_yeq-GR4A},
    }

  • J. Rückin, L. Jin, F. Magistri, C. Stachniss, and M. Popović, “Informative Path Planning for Active Learning in Aerial Semantic Mapping,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2022.
    [BibTeX] [PDF] [Code]
    @InProceedings{rueckin2022iros,
    author = {J. R{\"u}ckin and L. Jin and F. Magistri and C. Stachniss and M. Popovi\'c},
    title = {{Informative Path Planning for Active Learning in Aerial Semantic Mapping}},
    booktitle = iros,
    year = {2022},
    codeurl = {https://github.com/dmar-bonn/ipp-al},
    }

  • J. Rückin, L. Jin, and M. Popović, “Adaptive Informative Path Planning Using Deep Reinforcement Learning for UAV-based Active Sensing,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2022.
    [BibTeX] [PDF] [Code]
    @inproceedings{rueckin2022icra,
    author = {R{\"u}ckin, Julius and Jin, Liren and Popović, Marija},
    booktitle = icra,
    title = {{Adaptive Informative Path Planning Using Deep Reinforcement Learning for UAV-based Active Sensing}},
    year = {2022},
    codeurl = {https://github.com/dmar-bonn/ipp-rl},
    }

  • F. Magistri, E. Marks, S. Nagulavancha, I. Vizzo, T. Läbe, J. Behley, M. Halstead, C. McCool, and C. Stachniss, “Contrastive 3D Shape Completion and Reconstruction for Agricultural Robots using RGB-D Frames,” IEEE Robotics and Automation Letters (RA-L), vol. 7, iss. 4, pp. 10120-10127, 2022.
    [BibTeX] [PDF] [Video]
    @article{magistri2022ral-iros,
    author = {Federico Magistri and Elias Marks and Sumanth Nagulavancha and Ignacio Vizzo and Thomas L{\"a}be and Jens Behley and Michael Halstead and Chris McCool and Cyrill Stachniss},
    title = {Contrastive 3D Shape Completion and Reconstruction for Agricultural Robots using RGB-D Frames},
    journal = ral,
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/magistri2022ral-iros.pdf},
    year = {2022},
    volume={7},
    number={4},
    pages={10120-10127},
    videourl = {https://www.youtube.com/watch?v=2ErUf9q7YOI},
    }

  • Y. Goel, N. Vaskevicius, L. Palmieri, N. Chebrolu, and C. Stachniss, “Predicting Dense and Context-aware Cost Maps for Semantic Robot Navigation,” in IROS Workshop on Perception and Navigation for Autonomous Robotics in Unstructured and Dynamic Environments, 2022.
    [BibTeX] [PDF]
    @inproceedings{goel2022irosws,
    title = {{Predicting Dense and Context-aware Cost Maps for Semantic Robot Navigation}},
    author = {Y. Goel and N. Vaskevicius and L. Palmieri and N. Chebrolu and C. Stachniss},
    booktitle = {IROS Workshop on Perception and Navigation for Autonomous Robotics in Unstructured and Dynamic Environments},
    year = {2022},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/goel2022irosws.pdf},
    }

  • I. Vizzo, B. Mersch, R. Marcuzzi, L. Wiesmann, J. Behley, and C. Stachniss, “Make it Dense: Self-Supervised Geometric Scan Completion of Sparse 3D LiDAR Scans in Large Outdoor Environments,” IEEE Robotics and Automation Letters (RA-L), vol. 7, iss. 3, pp. 8534-8541, 2022. doi:10.1109/LRA.2022.3187255
    [BibTeX] [PDF] [Code] [Video]
    @article{vizzo2022ral,
    author = {I. Vizzo and B. Mersch and R. Marcuzzi and L. Wiesmann and J. Behley and C. Stachniss},
    title = {Make it Dense: Self-Supervised Geometric Scan Completion of Sparse 3D LiDAR Scans in Large Outdoor Environments},
    journal = ral,
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/vizzo2022ral-iros.pdf},
    codeurl = {https://github.com/PRBonn/make_it_dense},
    year = {2022},
    volume = {7},
    number = {3},
    pages = {8534-8541},
    doi = {10.1109/LRA.2022.3187255},
    videourl = {https://youtu.be/NVjURcArHn8},
    }

  • J. Sun, Y. Wang, M. Feng, D. Wang, J. Zhao, C. Stachniss, and X. Chen, “ICK-Track: A Category-Level 6-DoF Pose Tracker Using Inter-Frame Consistent Keypoints for Aerial Manipulation,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2022.
    [BibTeX] [PDF] [Code]
    @inproceedings{sun2022iros,
    title = {{ICK-Track: A Category-Level 6-DoF Pose Tracker Using Inter-Frame Consistent Keypoints for Aerial Manipulation}},
    author = {Jingtao Sun and Yaonan Wang and Mingtao Feng and Danwei Wang and Jiawen Zhao and Cyrill Stachniss and Xieyuanli Chen},
    booktitle = iros,
    year = {2022},
    codeurl = {https://github.com/S-JingTao/ICK-Track}
    }

  • L. Nunes, X. Chen, R. Marcuzzi, A. Osep, L. Leal-Taixé, C. Stachniss, and J. Behley, “Unsupervised Class-Agnostic Instance Segmentation of 3D LiDAR Data for Autonomous Vehicles,” IEEE Robotics and Automation Letters (RA-L), 2022. doi:10.1109/LRA.2022.3187872
    [BibTeX] [PDF] [Code] [Video]
    @article{nunes2022ral-3duis,
    author = {Lucas Nunes and Xieyuanli Chen and Rodrigo Marcuzzi and Aljosa Osep and Laura Leal-Taixé and Cyrill Stachniss and Jens Behley},
    title = {{Unsupervised Class-Agnostic Instance Segmentation of 3D LiDAR Data for Autonomous Vehicles}},
    journal = ral,
    url = {https://www.ipb.uni-bonn.de/pdfs/nunes2022ral-iros.pdf},
    codeurl = {https://github.com/PRBonn/3DUIS},
    videourl= {https://youtu.be/cgv0wUaqLAE},
    doi = {10.1109/LRA.2022.3187872},
    year = 2022
    }

  • B. Mersch, X. Chen, I. Vizzo, L. Nunes, J. Behley, and C. Stachniss, “Receding Moving Object Segmentation in 3D LiDAR Data Using Sparse 4D Convolutions,” IEEE Robotics and Automation Letters (RA-L), vol. 7, iss. 3, p. 7503–7510, 2022. doi:10.1109/LRA.2022.3183245
    [BibTeX] [PDF] [Code] [Video]
    @article{mersch2022ral,
    author = {B. Mersch and X. Chen and I. Vizzo and L. Nunes and J. Behley and C. Stachniss},
    title = {{Receding Moving Object Segmentation in 3D LiDAR Data Using Sparse 4D Convolutions}},
    journal = ral,
    year = 2022,
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/mersch2022ral.pdf},
    volume = {7},
    number = {3},
    pages = {7503--7510},
    doi = {10.1109/LRA.2022.3183245},
    codeurl = {https://github.com/PRBonn/4DMOS},
    videourl = {https://youtu.be/5aWew6caPNQ},
    }

  • T. Guadagnino, X. Chen, M. Sodano, J. Behley, G. Grisetti, and C. Stachniss, “Fast Sparse LiDAR Odometry Using Self-Supervised Feature Selection on Intensity Images,” IEEE Robotics and Automation Letters (RA-L), vol. 7, iss. 3, pp. 7597-7604, 2022. doi:10.1109/LRA.2022.3184454
    [BibTeX] [PDF]
    @article{guadagnino2022ral,
    author = {T. Guadagnino and X. Chen and M. Sodano and J. Behley and G. Grisetti and C. Stachniss},
    title = {{Fast Sparse LiDAR Odometry Using Self-Supervised Feature Selection on Intensity Images}},
    journal = ral,
    year = 2022,
    volume = {7},
    number = {3},
    pages = {7597-7604},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/guadagnino2022ral-iros.pdf},
    issn = {2377-3766},
    doi = {10.1109/LRA.2022.3184454}
    }

  • L. Wiesmann, T. Guadagnino, I. Vizzo, G. Grisetti, J. Behley, and C. Stachniss, “DCPCR: Deep Compressed Point Cloud Registration in Large-Scale Outdoor Environments,” IEEE Robotics and Automation Letters (RA-L), vol. 7, iss. 3, pp. 6327-6334, 2022. doi:10.1109/LRA.2022.3171068
    [BibTeX] [PDF] [Code] [Video]
    @article{wiesmann2022ral-iros,
    author = {L. Wiesmann and T. Guadagnino and I. Vizzo and G. Grisetti and J. Behley and C. Stachniss},
    title = {{DCPCR: Deep Compressed Point Cloud Registration in Large-Scale Outdoor Environments}},
    journal = ral,
    year = 2022,
    volume = 7,
    number = 3,
    pages = {6327-6334},
    issn = {2377-3766},
    doi = {10.1109/LRA.2022.3171068},
    codeurl = {https://github.com/PRBonn/DCPCR},
    videourl = {https://youtu.be/RqLr2RTGy1s},
    }

  • L. Peters, D. Fridovich-Keil, L. Ferranti, C. Stachniss, J. Alonso-Mora, and F. Laine, “Learning Mixed Strategies in Trajectory Games,” in Proc. of Robotics: Science and Systems (RSS), 2022.
    [BibTeX] [PDF]
    @inproceedings{peters2022rss,
    title = {{Learning Mixed Strategies in Trajectory Games}},
    author = {L. Peters and D. Fridovich-Keil and L. Ferranti and C. Stachniss and J. Alonso-Mora and F. Laine},
    booktitle = rss,
    year = {2022},
    url = {https://arxiv.org/pdf/2205.00291}
    }

  • X. Chen, B. Mersch, L. Nunes, R. Marcuzzi, I. Vizzo, J. Behley, and C. Stachniss, “Automatic Labeling to Generate Training Data for Online LiDAR-Based Moving Object Segmentation,” IEEE Robotics and Automation Letters (RA-L), vol. 7, iss. 3, pp. 6107-6114, 2022. doi:10.1109/LRA.2022.3166544
    [BibTeX] [PDF] [Code] [Video]
    @article{chen2022ral,
    author = {X. Chen and B. Mersch and L. Nunes and R. Marcuzzi and I. Vizzo and J. Behley and C. Stachniss},
    title = {{Automatic Labeling to Generate Training Data for Online LiDAR-Based Moving Object Segmentation}},
    journal = ral,
    year = 2022,
    volume = 7,
    number = 3,
    pages = {6107-6114},
    url = {https://arxiv.org/pdf/2201.04501},
    issn = {2377-3766},
    doi = {10.1109/LRA.2022.3166544},
    codeurl = {https://github.com/PRBonn/auto-mos},
    videourl = {https://youtu.be/3V5RA1udL4c},
    }

  • S. Yang, L. Zheng, X. Chen, L. Zabawa, M. Zhang, and M. Wang, “Transfer Learning from Synthetic In-vitro Soybean Pods Dataset for In-situ Segmentation of On-branch Soybean Pod,” in Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition Workshops, 2022, pp. 1666-1675.
    [BibTeX] [PDF]
    @inproceedings{yang2022cvprws,
    author = {Yang, Si and Zheng, Lihua and Chen, Xieyuanli and Zabawa, Laura and Zhang, Man and Wang, Minjuan},
    title = {{Transfer Learning from Synthetic In-vitro Soybean Pods Dataset for In-situ Segmentation of On-branch Soybean Pod}},
    booktitle = {Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition Workshops},
    url={https://openaccess.thecvf.com/content/CVPR2022W/AgriVision/papers/Yang_Transfer_Learning_From_Synthetic_In-Vitro_Soybean_Pods_Dataset_for_In-Situ_CVPRW_2022_paper.pdf},
    pages={1666-1675},
    year = 2022,
    }

  • I. Vizzo, T. Guadagnino, J. Behley, and C. Stachniss, “VDBFusion: Flexible and Efficient TSDF Integration of Range Sensor Data,” Sensors, vol. 22, iss. 3, 2022. doi:10.3390/s22031296
    [BibTeX] [PDF] [Code]
    @article{vizzo2022sensors,
    author = {Vizzo, I. and Guadagnino, T. and Behley, J. and Stachniss, C.},
    title = {VDBFusion: Flexible and Efficient TSDF Integration of Range Sensor Data},
    journal = {Sensors},
    volume = {22},
    year = {2022},
    number = {3},
    article-number = {1296},
    url = {https://www.mdpi.com/1424-8220/22/3/1296},
    issn = {1424-8220},
    doi = {10.3390/s22031296},
    codeurl = {https://github.com/PRBonn/vdbfusion},
    }

  • L. Wiesmann, R. Marcuzzi, C. Stachniss, and J. Behley, “Retriever: Point Cloud Retrieval in Compressed 3D Maps,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2022.
    [BibTeX] [PDF]
    @inproceedings{wiesmann2022icra,
    author = {L. Wiesmann and R. Marcuzzi and C. Stachniss and J. Behley},
    title = {{Retriever: Point Cloud Retrieval in Compressed 3D Maps}},
    booktitle = icra,
    year = 2022,
    }

  • E. Marks, F. Magistri, and C. Stachniss, “Precise 3D Reconstruction of Plants from UAV Imagery Combining Bundle Adjustment and Template Matching,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2022.
    [BibTeX] [PDF]
    @inproceedings{marks2022icra,
    author = {E. Marks and F. Magistri and C. Stachniss},
    title = {{Precise 3D Reconstruction of Plants from UAV Imagery Combining Bundle Adjustment and Template Matching}},
    booktitle = icra,
    year = 2022,
    }

  • J. Weyler, J. Quakernack, P. Lottes, J. Behley, and C. Stachniss, “Joint Plant and Leaf Instance Segmentation on Field-Scale UAV Imagery,” IEEE Robotics and Automation Letters (RA-L), vol. 7, iss. 2, pp. 3787-3794, 2022. doi:10.1109/LRA.2022.3147462
    [BibTeX] [PDF]
    @article{weyler2022ral,
    author = {J. Weyler and J. Quakernack and P. Lottes and J. Behley and C. Stachniss},
    title = {{Joint Plant and Leaf Instance Segmentation on Field-Scale UAV Imagery}},
    journal = ral,
    year = 2022,
    doi = {10.1109/LRA.2022.3147462},
    issn = {377-3766},
    volume = {7},
    number = {2},
    pages = {3787-3794},
    }

  • L. Nunes, R. Marcuzzi, X. Chen, J. Behley, and C. Stachniss, “SegContrast: 3D Point Cloud Feature Representation Learning through Self-supervised Segment Discrimination,” IEEE Robotics and Automation Letters (RA-L), vol. 7, iss. 2, pp. 2116-2123, 2022. doi:10.1109/LRA.2022.3142440
    [BibTeX] [PDF] [Code] [Video]
    @article{nunes2022ral,
    author = {L. Nunes and R. Marcuzzi and X. Chen and J. Behley and C. Stachniss},
    title = {{SegContrast: 3D Point Cloud Feature Representation Learning through Self-supervised Segment Discrimination}},
    journal = ral,
    year = 2022,
    doi = {10.1109/LRA.2022.3142440},
    issn = {2377-3766},
    volume = {7},
    number = {2},
    pages = {2116-2123},
    url = {https://www.ipb.uni-bonn.de/pdfs/nunes2022ral-icra.pdf},
    codeurl = {https://github.com/PRBonn/segcontrast},
    videourl = {https://youtu.be/kotRb_ySnIw},
    }

  • R. Marcuzzi, L. Nunes, L. Wiesmann, I. Vizzo, J. Behley, and C. Stachniss, “Contrastive Instance Association for 4D Panoptic Segmentation using Sequences of 3D LiDAR Scans,” IEEE Robotics and Automation Letters (RA-L), vol. 7, iss. 2, pp. 1550-1557, 2022. doi:10.1109/LRA.2022.3140439
    [BibTeX] [PDF]
    @article{marcuzzi2022ral,
    author = {R. Marcuzzi and L. Nunes and L. Wiesmann and I. Vizzo and J. Behley and C. Stachniss},
    title = {{Contrastive Instance Association for 4D Panoptic Segmentation using Sequences of 3D LiDAR Scans}},
    journal = ral,
    year = 2022,
    doi = {10.1109/LRA.2022.3140439},
    issn = {2377-3766},
    volume = 7,
    number = 2,
    pages = {1550-1557},
    }

  • J. Weyler, F. Magistri, P. Seitz, J. Behley, and C. Stachniss, “In-Field Phenotyping Based on Crop Leaf and Plant Instance Segmentation,” in Proc. of the Winter Conf. on Applications of Computer Vision (WACV), 2022.
    [BibTeX] [PDF]
    @inproceedings{weyler2022wacv,
    author = {J. Weyler and F. Magistri and P. Seitz and J. Behley and C. Stachniss},
    title = {{In-Field Phenotyping Based on Crop Leaf and Plant Instance Segmentation}},
    booktitle = wacv,
    year = 2022,
    }

  • S. Li, X. Chen, Y. Liu, D. Dai, C. Stachniss, and J. Gall, “Multi-scale Interaction for Real-time LiDAR Data Segmentation on an Embedded Platform,” IEEE Robotics and Automation Letters (RA-L), vol. 7, iss. 2, pp. 738-745, 2022. doi:10.1109/LRA.2021.3132059
    [BibTeX] [PDF] [Code] [Video]
    @article{li2022ral,
    author = {S. Li and X. Chen and Y. Liu and D. Dai and C. Stachniss and J. Gall},
    title = {{Multi-scale Interaction for Real-time LiDAR Data Segmentation on an Embedded Platform}},
    journal = ral,
    year = 2022,
    doi = {10.1109/LRA.2021.3132059},
    issn = {2377-3766},
    volume = 7,
    number = 2,
    pages = {738-745},
    codeurl = {https://github.com/sj-li/MINet},
    videourl = {https://youtu.be/WDhtz5tZ5vQ},
    }

  • L. Jin, J. Rückin, S. H. Kiss, T. Vidal-Calleja, and M. Popović, “Adaptive-resolution field mapping using Gaussian process fusion with integral kernels,” IEEE Robotics and Automation Letters (RA-L), vol. 7, iss. 3, p. 7471–7478, 2022.
    [BibTeX] [PDF] [Code]
    @article{jin2022ral,
    title={{Adaptive-resolution field mapping using Gaussian process fusion with integral kernels}},
    author={Jin, Liren and R{\"u}ckin, Julius and Kiss, Stefan H and Vidal-Calleja, Teresa and Popovi{\'c}, Marija},
    journal=ral,
    volume={7},
    number={3},
    pages={7471--7478},
    year={2022},
    codeurl = {https://github.com/dmar-bonn/argpf_mapping},
    }

2021

  • K. Schindler and W. Förstner, “Photogrammetry,” in Computer Vision, A Reference Guide, 2nd Edition, K. Ikeuchi, Ed., , 2021. doi:10.1007/978-3-030-63416-2
    [BibTeX] [PDF]

    This comprehensive reference provides easy access to relevant information on all aspects of Computer Vision. An A-Z format of over 240 entries offers a diverse range of topics for those seeking entry into any aspect within the broad field of Computer Vision. Over 200 Authors from both industry and academia contributed to this volume. Each entry includes synonyms, a definition and discussion of the topic, and a robust bibliography. Extensive cross-references to other entries support efficient, user-friendly searches for immediate access to relevant information. Entries were peer-reviewed by a distinguished international advisory board, both scientifically and geographically diverse, ensuring balanced coverage. Over 3700 bibliographic references for further reading enable deeper exploration into any of the topics covered. The content of Computer Vision: A Reference Guide is expository and tutorial, making the book a practical resource for students who are considering entering the field, as well as professionals in other fields who need to access this vital information but may not have the time to work their way through an entire text on their topic of interest.

    @InCollection{schindler2021inbook,
    author = {Konrad Schindler and Wolfgang F{\"{o}}rstner},
    booktitle = {{Computer Vision, {A} Reference Guide, 2nd Edition}},
    title = {Photogrammetry},
    editor = {{K. Ikeuchi}},
    abstract = {This comprehensive reference provides easy access to relevant information on all aspects of Computer Vision. An A-Z format of over 240 entries offers a diverse range of topics for those seeking entry into any aspect within the broad field of Computer Vision. Over 200 Authors from both industry and academia contributed to this volume. Each entry includes synonyms, a definition and discussion of the topic, and a robust bibliography. Extensive cross-references to other entries support efficient, user-friendly searches for immediate access to relevant information. Entries were peer-reviewed by a distinguished international advisory board, both scientifically and geographically diverse, ensuring balanced coverage. Over 3700 bibliographic references for further reading enable deeper exploration into any of the topics covered. The content of Computer Vision: A Reference Guide is expository and tutorial, making the book a practical resource for students who are considering entering the field, as well as professionals in other fields who need to access this vital information but may not have the time to work their way through an entire text on their topic of interest. },
    http = {{https://link.springer.com/content/pdf/bfm%3A978-3-030-63416-2%2F1.pdf}},
    doi = {10.1007/978-3-030-63416-2},
    page = {968--970},
    year = {2021},
    }

  • H. Kuang, Y. Zhu, Z. Zhang, X. Li, J. Tighe, S. Schwertfeger, C. Stachniss, and M. Li, “Video Contrastive Learning With Global Context,” in Proc. of the Intl. Conf. on Computer Vision Workshops, 2021, pp. 3195-3204.
    [BibTeX] [PDF] [Code]
    @inproceedings{kuang2021iccvws,
    author = {Kuang, Haofei and Zhu, Yi and Zhang, Zhi and Li, Xinyu and Tighe, Joseph and Schwertfeger, S\"oren and Stachniss, Cyrill and Li, Mu},
    title = {{Video Contrastive Learning With Global Context}},
    booktitle = iccvws,
    year = {2021},
    pages = {3195-3204},
    codeurl = {https://github.com/amazon-research/video-contrastive-learning},
    url = {https://openaccess.thecvf.com/content/ICCV2021W/CVEU/papers/Kuang_Video_Contrastive_Learning_With_Global_Context_ICCVW_2021_paper.pdf},
    }

  • A. Barreto, P. Lottes, F. R. Ispizua, S. Baumgarten, N. A. Wolf, C. Stachniss, A. -K. Mahlein, and S. Paulus, “Automatic UAV-based counting of seedlings in sugar-beet field and extension to maize and strawberry,” Computers and Electronics in Agriculture, 2021.
    [BibTeX] [PDF]
    @article{barreto2021cea,
    author = {A. Barreto and P. Lottes and F.R. Ispizua and S. Baumgarten and N.A. Wolf and C. Stachniss and A.-K. Mahlein and S. Paulus},
    title = {Automatic UAV-based counting of seedlings in sugar-beet field and extension to maize and strawberry
    },
    journal = {Computers and Electronics in Agriculture},
    year = {2021},
    }

  • B. Mersch, X. Chen, J. Behley, and C. Stachniss, “Self-supervised Point Cloud Prediction Using 3D Spatio-temporal Convolutional Networks,” in Proc. of the Conf. on Robot Learning (CoRL), 2021.
    [BibTeX] [PDF] [Code] [Video]
    @InProceedings{mersch2021corl,
    author = {B. Mersch and X. Chen and J. Behley and C. Stachniss},
    title = {{Self-supervised Point Cloud Prediction Using 3D Spatio-temporal Convolutional Networks}},
    booktitle = corl,
    year = {2021},
    url = {https://www.ipb.uni-bonn.de/pdfs/mersch2021corl.pdf},
    codeurl = {https://github.com/PRBonn/point-cloud-prediction},
    videourl = {https://youtu.be/-pSZpPgFAso},
    }

  • J. Behley, M. Garbade, A. Milioto, J. Quenzel, S. Behnke, J. Gall, and C. Stachniss, “Towards 3D LiDAR-based semantic scene understanding of 3D point cloud sequences: The SemanticKITTI Dataset,” Intl. Journal of Robotics Research (IJRR), vol. 40, iss. 8-9, pp. 959-967, 2021. doi:10.1177/02783649211006735
    [BibTeX] [PDF]
    @article{behley2021ijrr,
    author = {J. Behley and M. Garbade and A. Milioto and J. Quenzel and S. Behnke and J. Gall and C. Stachniss},
    title = {Towards 3D LiDAR-based semantic scene understanding of 3D point cloud sequences: The SemanticKITTI Dataset},
    journal = ijrr,
    volume = {40},
    number = {8-9},
    pages = {959-967},
    year = {2021},
    doi = {10.1177/02783649211006735},
    url = {https://www.ipb.uni-bonn.de/pdfs/behley2021ijrr.pdf}
    }

  • A. Pretto, S. Aravecchia, W. Burgard, N. Chebrolu, C. Dornhege, T. Falck, F. Fleckenstein, A. Fontenla, M. Imperoli, R. Khanna, F. Liebisch, P. Lottes, A. Milioto, D. Nardi, S. Nardi, J. Pfeifer, M. Popovic, C. Potena, C. Pradalier, E. Rothacker-Feder, I. Sa, A. Schaefer, R. Siegwart, C. Stachniss, A. Walter, V. Winterhalter, X. Wu, and J. Nieto, “Building an Aerial-Ground Robotics Systemfor Precision Farming: An Adaptable Solution,” IEEE Robotics & Automation Magazine, vol. 28, iss. 3, 2021.
    [BibTeX] [PDF]
    @Article{pretto2021ram,
    title = {{Building an Aerial-Ground Robotics Systemfor Precision Farming: An Adaptable Solution}},
    author = {A. Pretto and S. Aravecchia and W. Burgard and N. Chebrolu and C. Dornhege and T. Falck and F. Fleckenstein and A. Fontenla and M. Imperoli and R. Khanna and F. Liebisch and P. Lottes and A. Milioto and D. Nardi and S. Nardi and J. Pfeifer and M. Popovic and C. Potena and C. Pradalier and E. Rothacker-Feder and I. Sa and A. Schaefer and R. Siegwart and C. Stachniss and A. Walter and V. Winterhalter and X. Wu and J. Nieto},
    journal = ram,
    volume = 28,
    number = 3,
    year = {2021},
    url={https://www.ipb.uni-bonn.de/pdfs/pretto2021ram.pdf}
    }

  • D. Schunck, F. Magistri, R. A. Rosu, A. Cornelißen, N. Chebrolu, S. Paulus, J. Léon, S. Behnke, C. Stachniss, H. Kuhlmann, and L. Klingbeil, “Pheno4D: A spatio-temporal dataset of maize and tomato plant point clouds for phenotyping and advanced plant analysis ,” PLoS ONE, vol. 16, iss. 8, pp. 1-18, 2021. doi:10.1371/journal.pone.0256340
    [BibTeX] [PDF]

    Understanding the growth and development of individual plants is of central importance in modern agriculture, crop breeding, and crop science. To this end, using 3D data for plant analysis has gained attention over the last years. High-resolution point clouds offer the potential to derive a variety of plant traits, such as plant height, biomass, as well as the number and size of relevant plant organs. Periodically scanning the plants even allows for performing spatio-temporal growth analysis. However, highly accurate 3D point clouds from plants recorded at different growth stages are rare, and acquiring this kind of data is costly. Besides, advanced plant analysis methods from machine learning require annotated training data and thus generate intense manual labor before being able to perform an analysis. To address these issues, we present with this dataset paper a multi-temporal dataset featuring high-resolution registered point clouds of maize and tomato plants, which we manually labeled for computer vision tasks, such as for instance segmentation and 3D reconstruction, providing approximately 260 million labeled 3D points. To highlight the usability of the data and to provide baselines for other researchers, we show a variety of applications ranging from point cloud segmentation to non-rigid registration and surface reconstruction. We believe that our dataset will help to develop new algorithms to advance the research for plant phenotyping, 3D reconstruction, non-rigid registration, and deep learning on raw point clouds. The dataset is freely accessible at https://www.ipb.uni-bonn.de/data/pheno4d/.

    @article{schunck2021plosone,
    author = {D. Schunck and F. Magistri and R.A. Rosu and A. Corneli{\ss}en and N. Chebrolu and S. Paulus and J. L\'eon and S. Behnke and C. Stachniss and H. Kuhlmann and L. Klingbeil},
    title = {{Pheno4D: A spatio-temporal dataset of maize and tomato plant point clouds for phenotyping and advanced plant analysis
    }},
    journal = plosone,
    year = 2021,
    url = {https://journals.plos.org/plosone/article/file?id=10.1371/journal.pone.0256340&type=printable},
    volume = {16},
    number = {8},
    doi = {10.1371/journal.pone.0256340},
    pages = {1-18},
    abstract = {Understanding the growth and development of individual plants is of central importance in modern agriculture, crop breeding, and crop science. To this end, using 3D data for plant analysis has gained attention over the last years. High-resolution point clouds offer the potential to derive a variety of plant traits, such as plant height, biomass, as well as the number and size of relevant plant organs. Periodically scanning the plants even allows for performing spatio-temporal growth analysis. However, highly accurate 3D point clouds from plants recorded at different growth stages are rare, and acquiring this kind of data is costly. Besides, advanced plant analysis methods from machine learning require annotated training data and thus generate intense manual labor before being able to perform an analysis. To address these issues, we present with this dataset paper a multi-temporal dataset featuring high-resolution registered point clouds of maize and tomato plants, which we manually labeled for computer vision tasks, such as for instance segmentation and 3D reconstruction, providing approximately 260 million labeled 3D points. To highlight the usability of the data and to provide baselines for other researchers, we show a variety of applications ranging from point cloud segmentation to non-rigid registration and surface reconstruction. We believe that our dataset will help to develop new algorithms to advance the research for plant phenotyping, 3D reconstruction, non-rigid registration, and deep learning on raw point clouds. The dataset is freely accessible at https://www.ipb.uni-bonn.de/data/pheno4d/.},
    }

  • F. Stache, J. Westheider, F. Magistri, M. Popović, and C. Stachniss, “Adaptive Path Planning for UAV-based Multi-Resolution Semantic Segmentation,” in Proc. of the European Conf. on Mobile Robots (ECMR), 2021.
    [BibTeX] [PDF]
    @InProceedings{stache2021ecmr,
    author = {F. Stache and J. Westheider and F. Magistri and M. Popovi\'c and C. Stachniss},
    title = {{Adaptive Path Planning for UAV-based Multi-Resolution Semantic Segmentation}},
    booktitle = ecmr,
    year = {2021},
    }

  • M. Arora, L. Wiesmann, X. Chen, and C. Stachniss, “Mapping the Static Parts of Dynamic Scenes from 3D LiDAR Point Clouds Exploiting Ground Segmentation,” in Proc. of the European Conf. on Mobile Robots (ECMR), 2021.
    [BibTeX] [PDF] [Code]
    @InProceedings{arora2021ecmr,
    author = {M. Arora and L. Wiesmann and X. Chen and C. Stachniss},
    title = {{Mapping the Static Parts of Dynamic Scenes from 3D LiDAR Point Clouds Exploiting Ground Segmentation}},
    booktitle = ecmr,
    codeurl = {https://github.com/humbletechy/Dynamic-Point-Removal},
    year = {2021},
    }

  • H. Dong, X. Chen, and C. Stachniss, “Online Range Image-based Pole Extractor for Long-term LiDAR Localization in Urban Environments,” in Proc. of the European Conf. on Mobile Robots (ECMR), 2021.
    [BibTeX] [PDF] [Code]
    @InProceedings{dong2021ecmr,
    author = {H. Dong and X. Chen and C. Stachniss},
    title = {{Online Range Image-based Pole Extractor for Long-term LiDAR Localization in Urban Environments}},
    booktitle = ecmr,
    year = {2021},
    codeurl = {https://github.com/PRBonn/pole-localization},
    url = {https://www.ipb.uni-bonn.de/pdfs/dong2021ecmr.pdf}
    }

  • X. Chen, T. Läbe, A. Milioto, T. Röhling, J. Behley, and C. Stachniss, “OverlapNet: A Siamese Network for Computing LiDAR Scan Similarity with Applications to Loop Closing and Localization,” Autonomous Robots, vol. 46, p. 61–81, 2021. doi:10.1007/s10514-021-09999-0
    [BibTeX] [PDF] [Code]
    @article{chen2021auro,
    author = {X. Chen and T. L\"abe and A. Milioto and T. R\"ohling and J. Behley and C. Stachniss},
    title = {{OverlapNet: A Siamese Network for Computing LiDAR Scan Similarity with Applications to Loop Closing and Localization}},
    journal = {Autonomous Robots},
    year = {2021},
    doi = {10.1007/s10514-021-09999-0},
    issn = {1573-7527},
    volume=46,
    pages={61--81},
    codeurl = {https://github.com/PRBonn/OverlapNet},
    url = {https://www.ipb.uni-bonn.de/pdfs/chen2021auro.pdf}
    }

  • L. Di Giammarino, I. Aloise, C. Stachniss, and G. Grisetti, “Visual Place Recognition using LiDAR Intensity Information ,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2021.
    [BibTeX] [PDF]
    @inproceedings{digiammarino2021iros,
    title = {{Visual Place Recognition using LiDAR Intensity Information }},
    author = {Di Giammarino, L. and I. Aloise and C. Stachniss and G. Grisetti},
    booktitle = iros,
    year = {2021}
    }

  • P. Rottmann, T. Posewsky, A. Milioto, C. Stachniss, and J. Behley, “Improving Monocular Depth Estimation by Semantic Pre-training,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2021.
    [BibTeX] [PDF]
    @inproceedings{rottmann2021iros,
    title = {{Improving Monocular Depth Estimation by Semantic Pre-training}},
    author = {P. Rottmann and T. Posewsky and A. Milioto and C. Stachniss and J. Behley},
    booktitle = iros,
    year = {2021},
    url = {https://www.ipb.uni-bonn.de/pdfs/rottmann2021iros.pdf}
    }

  • B. Mersch, T. Höllen, K. Zhao, C. Stachniss, and R. Roscher, “Maneuver-based Trajectory Prediction for Self-driving Cars Using Spatio-temporal Convolutional Networks,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2021.
    [BibTeX] [PDF] [Video]
    @inproceedings{mersch2021iros,
    title = {{Maneuver-based Trajectory Prediction for Self-driving Cars Using Spatio-temporal Convolutional Networks}},
    author = {B. Mersch and T. H\"ollen and K. Zhao and C. Stachniss and R. Roscher},
    booktitle = iros,
    year = {2021},
    videourl = {https://youtu.be/5RRGWUn4qAw},
    url = {https://www.ipb.uni-bonn.de/pdfs/mersch2021iros.pdf}
    }

  • M. Zhou, X. Chen, N. Samano, C. Stachniss, and A. Calway, “Efficient Localisation Using Images and OpenStreetMaps,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2021.
    [BibTeX] [PDF]
    @inproceedings{zhou2021iros,
    title = {Efficient Localisation Using Images and OpenStreetMaps},
    author = {Zhou, Mengjie and Chen, Xieyuanli and Samano, Noe and Stachniss, Cyrill and Calway, Andrew},
    booktitle = iros,
    year = {2021},
    url = {https://www.ipb.uni-bonn.de/pdfs/zhou2021iros.pdf}
    }

  • C. Shi, X. Chen, K. Huang, J. Xiao, H. Lu, and C. Stachniss, “Keypoint Matching for Point Cloud Registration using Multiplex Dynamic Graph Attention Networks,” IEEE Robotics and Automation Letters (RA-L), vol. 6, pp. 8221-8228, 2021. doi:10.1109/LRA.2021.3097275
    [BibTeX] [PDF]
    @article{shi2021ral,
    title={{Keypoint Matching for Point Cloud Registration using Multiplex Dynamic Graph Attention Networks}},
    author={C. Shi and X. Chen and K. Huang and J. Xiao and H. Lu and C. Stachniss},
    year={2021},
    journal=ral,
    volume=6,
    issue=4,
    pages={8221-8228},
    doi = {10.1109/LRA.2021.3097275},
    issn = {2377-3766},
    }

  • X. Chen, S. Li, B. Mersch, L. Wiesmann, J. Gall, J. Behley, and C. Stachniss, “Moving Object Segmentation in 3D LiDAR Data: A Learning-based Approach Exploiting Sequential Data,” IEEE Robotics and Automation Letters (RA-L), vol. 6, pp. 6529-6536, 2021. doi:10.1109/LRA.2021.3093567
    [BibTeX] [PDF] [Code] [Video]
    @article{chen2021ral,
    title={{Moving Object Segmentation in 3D LiDAR Data: A Learning-based Approach Exploiting Sequential Data}},
    author={X. Chen and S. Li and B. Mersch and L. Wiesmann and J. Gall and J. Behley and C. Stachniss},
    year={2021},
    volume=6,
    issue=4,
    pages={6529-6536},
    journal=ral,
    url = {https://www.ipb.uni-bonn.de/pdfs/chen2021ral-iros.pdf},
    codeurl = {https://github.com/PRBonn/LiDAR-MOS},
    videourl = {https://youtu.be/NHvsYhk4dhw},
    doi = {10.1109/LRA.2021.3093567},
    issn = {2377-3766},
    }

  • N. Chebrolu, “Spatio-Temporal Registration Techniques for Agricultural Robots,” PhD Thesis, 2021.
    [BibTeX] [PDF]
    @PhdThesis{chebrolu2021phd,
    author = {N. Chebrolu},
    title = {Spatio-Temporal Registration Techniques for Agricultural Robots},
    year = 2021,
    school = {University of Bonn},
    URL = {https://hdl.handle.net/20.500.11811/9166},
    }

  • L. Peters, D. Fridovich-Keil, V. Rubies-Royo, C. J. Tomlin, and C. Stachniss, “Inferring Objectives in Continuous Dynamic Games from Noise-Corrupted Partial State Observations,” in Proc. of Robotics: Science and Systems (RSS), 2021.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{peters2021rss,
    title = {Inferring Objectives in Continuous Dynamic Games from Noise-Corrupted Partial State Observations},
    author = {Peters, Lasse and Fridovich-Keil, David and Rubies-Royo, Vicenc and Tomlin, Claire J. and Stachniss, Cyrill},
    booktitle = rss,
    year = {2021},
    codeurl = {https://github.com/PRBonn/PartiallyObservedInverseGames.jl},
    videourl = {https://www.youtube.com/watch?v=BogCsYQX9Pc},
    url = {https://arxiv.org/abs/2106.03611}
    }

  • M. Aygün, A. Osep, M. Weber, M. Maximov, C. Stachniss, J. Behley, and L. Leal-Taixe, “4D Panoptic Segmentation,” in Proc. of the IEEE/CVF Conf. on Computer Vision and Pattern Recognition (CVPR), 2021.
    [BibTeX] [PDF]
    @inproceedings{ayguen2021cvpr,
    author = {M. Ayg\"un and A. Osep and M. Weber and M. Maximov and C. Stachniss and J. Behley and L. Leal-Taixe},
    title = {{4D Panoptic Segmentation}},
    booktitle = cvpr,
    year = 2021,
    url = {https://www.ipb.uni-bonn.de/pdfs/ayguen2021cvpr.pdf}
    }

  • F. Magistri, N. Chebrolu, J. Behley, and C. Stachniss, “Towards In-Field Phenotyping Exploiting Differentiable Rendering with Self-Consistency Loss,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2021.
    [BibTeX] [PDF] [Video]
    @inproceedings{magistri2021icra,
    author = {F. Magistri and N. Chebrolu and J. Behley and C. Stachniss},
    title = {{Towards In-Field Phenotyping Exploiting Differentiable Rendering with Self-Consistency Loss}},
    booktitle = icra,
    year = 2021,
    videourl = {https://youtu.be/MF2A4ihY2lE},
    }

  • I. Vizzo, X. Chen, N. Chebrolu, J. Behley, and C. Stachniss, “Poisson Surface Reconstruction for LiDAR Odometry and Mapping,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2021.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{vizzo2021icra,
    author = {I. Vizzo and X. Chen and N. Chebrolu and J. Behley and C. Stachniss},
    title = {{Poisson Surface Reconstruction for LiDAR Odometry and Mapping}},
    booktitle = icra,
    year = 2021,
    url = {https://www.ipb.uni-bonn.de/pdfs/vizzo2021icra.pdf},
    codeurl = {https://github.com/PRBonn/puma},
    videourl = {https://youtu.be/7yWtYWaO5Nk}
    }

  • X. Chen, I. Vizzo, T. Läbe, J. Behley, and C. Stachniss, “Range Image-based LiDAR Localization for Autonomous Vehicles,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2021.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{chen2021icra,
    author = {X. Chen and I. Vizzo and T. L{\"a}be and J. Behley and C. Stachniss},
    title = {{Range Image-based LiDAR Localization for Autonomous Vehicles}},
    booktitle = icra,
    year = 2021,
    url = {https://www.ipb.uni-bonn.de/pdfs/chen2021icra.pdf},
    codeurl = {https://github.com/PRBonn/range-mcl},
    videourl = {https://youtu.be/hpOPXX9oPqI},
    }

  • A. Reinke, X. Chen, and C. Stachniss, “Simple But Effective Redundant Odometry for Autonomous Vehicles,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2021.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{reinke2021icra,
    title={{Simple But Effective Redundant Odometry for Autonomous Vehicles}},
    author={A. Reinke and X. Chen and C. Stachniss},
    booktitle=icra,
    year=2021,
    url = {https://www.ipb.uni-bonn.de/pdfs/reinke2021icra.pdf},
    codeurl = {https://github.com/PRBonn/MutiverseOdometry},
    videourl = {https://youtu.be/zLpnPEyDKfM}
    }

  • J. Behley, A. Milioto, and C. Stachniss, “A Benchmark for LiDAR-based Panoptic Segmentation based on KITTI,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2021.
    [BibTeX] [PDF]
    @inproceedings{behley2021icra,
    author = {J. Behley and A. Milioto and C. Stachniss},
    title = {{A Benchmark for LiDAR-based Panoptic Segmentation based on KITTI}},
    booktitle = icra,
    year = 2021,
    }

  • L. Peters, D. Fridovich-Keil, V. Rubies-Royo, C. J. Tomlin, and C. Stachniss, “Cost Inference in Smooth Dynamic Games from Noise-Corrupted Partial State Observations ,” in Proc. of the RSS Workshop on Social Robot Navigation, 2021.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{peters2021rssws,
    title = {{Cost Inference in Smooth Dynamic Games from Noise-Corrupted Partial State Observations
    }},
    author = {Peters, Lasse and Fridovich-Keil, David and Rubies-Royo, Vicenc and Tomlin, Claire J. and Stachniss, Cyrill},
    booktitle = {Proc. of the RSS Workshop on Social Robot Navigation},
    year = {2021},
    codeurl = {https://github.com/PRBonn/PartiallyObservedInverseGames.jl},
    videourl = {https://www.youtube.com/watch?v=BogCsYQX9Pc},
    url = {https://socialrobotnavigation.github.io/papers/paper13.pdf}
    }

  • N. Chebrolu, T. Läbe, O. Vysotska, J. Behley, and C. Stachniss, “Adaptive Robust Kernels for Non-Linear Least Squares Problems,” IEEE Robotics and Automation Letters (RA-L), vol. 6, pp. 2240-2247, 2021. doi:10.1109/LRA.2021.3061331
    [BibTeX] [PDF] [Video]
    @article{chebrolu2021ral,
    author = {N. Chebrolu and T. L\"{a}be and O. Vysotska and J. Behley and C. Stachniss},
    title = {{Adaptive Robust Kernels for Non-Linear Least Squares Problems}},
    journal = ral,
    volume = 6,
    issue = 2,
    pages = {2240-2247},
    doi = {10.1109/LRA.2021.3061331},
    year = 2021,
    videourl = {https://youtu.be/34Zp3ZX0Bnk}
    }

  • J. Weyler, A. Milioto, T. Falck, J. Behley, and C. Stachniss, “Joint Plant Instance Detection and Leaf Count Estimation for In-Field Plant Phenotyping,” IEEE Robotics and Automation Letters (RA-L), vol. 6, pp. 3599-3606, 2021. doi:10.1109/LRA.2021.3060712
    [BibTeX] [PDF] [Video]
    @article{weyler2021ral,
    author = {J. Weyler and A. Milioto and T. Falck and J. Behley and C. Stachniss},
    title = {{Joint Plant Instance Detection and Leaf Count Estimation for In-Field Plant Phenotyping}},
    journal = ral,
    volume = 6,
    issue = 2,
    pages = {3599-3606},
    doi = {10.1109/LRA.2021.3060712},
    year = 2021,
    videourl = {https://youtu.be/Is18Rey625I},
    }

  • L. Wiesmann, A. Milioto, X. Chen, C. Stachniss, and J. Behley, “Deep Compression for Dense Point Cloud Maps,” IEEE Robotics and Automation Letters (RA-L), vol. 6, pp. 2060-2067, 2021. doi:10.1109/LRA.2021.3059633
    [BibTeX] [PDF] [Code] [Video]
    @article{wiesmann2021ral,
    author = {L. Wiesmann and A. Milioto and X. Chen and C. Stachniss and J. Behley},
    title = {{Deep Compression for Dense Point Cloud Maps}},
    journal = ral,
    volume = 6,
    issue = 2,
    pages = {2060-2067},
    doi = {10.1109/LRA.2021.3059633},
    year = 2021,
    url = {https://www.ipb.uni-bonn.de/pdfs/wiesmann2021ral.pdf},
    codeurl = {https://github.com/PRBonn/deep-point-map-compression},
    videourl = {https://youtu.be/fLl9lTlZrI0}
    }

  • N. Chebrolu, F. Magistri, T. Läbe, and C. Stachniss, “Registration of Spatio-Temporal Point Clouds of Plants for Phenotyping,” PLoS ONE, vol. 16, iss. 2, 2021.
    [BibTeX] [PDF] [Video]
    @article{chebrolu2021plosone,
    author = {N. Chebrolu and F. Magistri and T. L{\"a}be and C. Stachniss},
    title = {{Registration of Spatio-Temporal Point Clouds of Plants for Phenotyping}},
    journal = plosone,
    year = 2021,
    volume = 16,
    number = 2,
    videourl = {https://youtu.be/OV39kb5Nqg8},
    }

  • F. Görlich, E. Marks, A. Mahlein, K. König, P. Lottes, and C. Stachniss, “UAV-Based Classification of Cercospora Leaf Spot Using RGB Images,” Drones, vol. 5, iss. 2, 2021. doi:10.3390/drones5020034
    [BibTeX] [PDF]

    Plant diseases can impact crop yield. Thus, the detection of plant diseases using sensors that can be mounted on aerial vehicles is in the interest of farmers to support decision-making in integrated pest management and to breeders for selecting tolerant or resistant genotypes. This paper investigated the detection of Cercospora leaf spot (CLS), caused by Cercospora beticola in sugar beet using RGB imagery. We proposed an approach to tackle the CLS detection problem using fully convolutional neural networks, which operate directly on RGB images captured by a UAV. This efficient approach does not require complex multi- or hyper-spectral sensors, but provides reliable results and high sensitivity. We provided a detection pipeline for pixel-wise semantic segmentation of CLS symptoms, healthy vegetation, and background so that our approach can automatically quantify the grade of infestation. We thoroughly evaluated our system using multiple UAV datasets recorded from different sugar beet trial fields. The dataset consisted of a training and a test dataset and originated from different fields. We used it to evaluate our approach under realistic conditions and analyzed its generalization capabilities to unseen environments. The obtained results correlated to visual estimation by human experts significantly. The presented study underlined the potential of high-resolution RGB imaging and convolutional neural networks for plant disease detection under field conditions. The demonstrated procedure is particularly interesting for applications under practical conditions, as no complex and cost-intensive measuring system is required.

    @Article{goerlich2021drones,
    AUTHOR = {Görlich, Florian and Marks, Elias and Mahlein, Anne-Katrin and König, Kathrin and Lottes, Philipp and Stachniss, Cyrill},
    TITLE = {{UAV-Based Classification of Cercospora Leaf Spot Using RGB Images}},
    JOURNAL = {Drones},
    VOLUME = {5},
    YEAR = {2021},
    NUMBER = {2},
    ARTICLE-NUMBER = {34},
    URL = {https://www.mdpi.com/2504-446X/5/2/34/pdf},
    ISSN = {2504-446X},
    ABSTRACT = {Plant diseases can impact crop yield. Thus, the detection of plant diseases using sensors that can be mounted on aerial vehicles is in the interest of farmers to support decision-making in integrated pest management and to breeders for selecting tolerant or resistant genotypes. This paper investigated the detection of Cercospora leaf spot (CLS), caused by Cercospora beticola in sugar beet using RGB imagery. We proposed an approach to tackle the CLS detection problem using fully convolutional neural networks, which operate directly on RGB images captured by a UAV. This efficient approach does not require complex multi- or hyper-spectral sensors, but provides reliable results and high sensitivity. We provided a detection pipeline for pixel-wise semantic segmentation of CLS symptoms, healthy vegetation, and background so that our approach can automatically quantify the grade of infestation. We thoroughly evaluated our system using multiple UAV datasets recorded from different sugar beet trial fields. The dataset consisted of a training and a test dataset and originated from different fields. We used it to evaluate our approach under realistic conditions and analyzed its generalization capabilities to unseen environments. The obtained results correlated to visual estimation by human experts significantly. The presented study underlined the potential of high-resolution RGB imaging and convolutional neural networks for plant disease detection under field conditions. The demonstrated procedure is particularly interesting for applications under practical conditions, as no complex and cost-intensive measuring system is required.},
    DOI = {10.3390/drones5020034}
    }

  • W. Förstner, Bayes-Schätzung und Maximum-Likelihood-Schätzung, 2021.
    [BibTeX] [PDF]

    Das Ziel dieser Notiz ist das Prinzip der Bayes-Schätzung und der Maximum-Likelihood-Schätzung zu erläutern.

    @misc{foerstner2021bayesml,
    author = {W. F{\"o}rstner},
    title = {{Bayes-Sch{\"a}tzung und Maximum-Likelihood-Sch{\"a}tzung}},
    year = 2021,
    url = {https://www.ipb.uni-bonn.de/pdfs/foerstner2021bayesml.pdf},
    abstract = {Das Ziel dieser Notiz ist das Prinzip der Bayes-Sch{\"a}tzung und der Maximum-Likelihood-Sch{\"a}tzung zu erl{\"a}utern.},
    }

  • C. Carbone, D. Albani, F. Magistri, D. Ognibene, C. Stachniss, G. Kootstra, D. Nardi, and V. Trianni, “Monitoring and Mapping of Crop Fields with UAV Swarms Based on Information Gain,” in Proc. of the Intl. Symp. on Distributed Autonomous Robotic Systems (DARS), 2021.
    [BibTeX] [PDF]
    @inproceedings{carbone2021dars,
    author = {C. Carbone and D. Albani and F. Magistri and D. Ognibene and C. Stachniss and G. Kootstra and D. Nardi and V. Trianni},
    title = {{Monitoring and Mapping of Crop Fields with UAV Swarms Based on Information Gain}},
    booktitle = dars,
    year = 2021,
    }

  • C. Stachniss, “Achievements Needed for Becoming a Professor,” Academia Letters, iss. 281, 2021. doi:https://doi.org/10.20935/AL281
    [BibTeX] [PDF] [Video]

    What is needed to become a professor? This article summarizes what selection committees often regard as the minimum achievements when recruiting new professors. My goal is to give early-career researchers a brief guideline on their way towards becoming a faculty member.

    @article{stachniss2021al,
    author = {C. Stachniss},
    title = {{Achievements Needed for Becoming a Professor}},
    year = {2021},
    journal = {Academia Letters},
    number = {281},
    doi = {https://doi.org/10.20935/AL281},
    url = {https://www.ipb.uni-bonn.de/pdfs/stachniss2021al.pdf},
    abstract = {What is needed to become a professor? This article summarizes what selection committees often regard as the minimum achievements when recruiting new professors. My goal is to give early-career researchers a brief guideline on their way towards becoming a faculty member.},
    videourl = {https://youtu.be/223cMIgN5p0}
    }

2020

  • D. Barath, M. Polic, W. Förstner, T. Sattler, T. Pajdla, and Z. Kukelova, “Making Affine Correspondences Work in Camera Geometry Computation,” in Computer Vision – ECCV 2020, Cham, 2020, p. 723–740. doi:https://doi.org/10.1007/978-3-030-58621-8_42
    [BibTeX] [PDF]
    @InProceedings{barath2020eccv,
    author="Barath, Daniel
    and Polic, Michal
    and F{\"o}rstner, Wolfgang
    and Sattler, Torsten
    and Pajdla, Tomas
    and Kukelova, Zuzana",
    editor="Vedaldi, Andrea
    and Bischof, Horst
    and Brox, Thomas
    and Frahm, Jan-Michael",
    title="Making Affine Correspondences Work in Camera Geometry Computation",
    booktitle="Computer Vision -- ECCV 2020",
    year="2020",
    publisher="Springer International Publishing",
    address="Cham",
    pages="723--740",
    isbn="978-3-030-58621-8",
    doi = {https://doi.org/10.1007/978-3-030-58621-8_42},
    url = {https://www.ecva.net/papers/eccv_2020/papers_ECCV/papers/123560698.pdf}
    }

  • C. Stachniss, I. Vizzo, L. Wiesmann, and N. Berning, How To Setup and Run a 100\% Digital Conf.: DIGICROP 2020, 2020.
    [BibTeX] [PDF]

    The purpose of this record is to document the setup and execution of DIGICROP 2020 and to simplify conducting future online events of that kind. DIGICROP 2020 was a 100\% virtual conference run via Zoom with around 900 registered people in November 2020. It consisted of video presentations available via our website and a single-day live event for Q&A. We had around 450 people attending the Q&A session overall, most of the time 200-250 people have been online at the same time. This document is a collection of notes, instructions, and todo lists. It is not a polished manual, however, we believe these notes will be useful for other conference organizers and for us in the future.

    @misc{stachniss2020digitalconf,
    author = {C. Stachniss and I. Vizzo and L. Wiesmann and N. Berning},
    title = {{How To Setup and Run a 100\% Digital Conf.: DIGICROP 2020}},
    year = {2020},
    url = {https://www.ipb.uni-bonn.de/pdfs/stachniss2020digitalconf.pdf},
    abstract = {The purpose of this record is to document the setup and execution of DIGICROP 2020 and to simplify conducting future online events of that kind. DIGICROP 2020 was a 100\% virtual conference run via Zoom with around 900 registered people in November 2020. It consisted of video presentations available via our website and a single-day live event for Q&A. We had around 450 people attending the Q&A session overall, most of the time 200-250 people have been online at the same time. This document is a collection of notes, instructions, and todo lists. It is not a polished manual, however, we believe these notes will be useful for other conference organizers and for us in the future.},
    }

  • A. Milioto, J. Behley, C. McCool, and C. Stachniss, “LiDAR Panoptic Segmentation for Autonomous Driving,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2020.
    [BibTeX] [PDF] [Video]
    @inproceedings{milioto2020iros,
    author = {A. Milioto and J. Behley and C. McCool and C. Stachniss},
    title = {{LiDAR Panoptic Segmentation for Autonomous Driving}},
    booktitle = iros,
    year = {2020},
    videourl = {https://www.youtube.com/watch?v=C9CTQSosr9I},
    }

  • X. Chen, T. Läbe, L. Nardi, J. Behley, and C. Stachniss, “Learning an Overlap-based Observation Model for 3D LiDAR Localization,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2020.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{chen2020iros,
    author = {X. Chen and T. L\"abe and L. Nardi and J. Behley and C. Stachniss},
    title = {{Learning an Overlap-based Observation Model for 3D LiDAR Localization}},
    booktitle = iros,
    year = {2020},
    codeurl = {https://github.com/PRBonn/overlap_localization},
    url={https://www.ipb.uni-bonn.de/pdfs/chen2020iros.pdf},
    videourl = {https://www.youtube.com/watch?v=BozPqy_6YcE},
    }

  • F. Langer, A. Milioto, A. Haag, J. Behley, and C. Stachniss, “Domain Transfer for Semantic Segmentation of LiDAR Data using Deep Neural Networks,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2020.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{langer2020iros,
    author = {F. Langer and A. Milioto and A. Haag and J. Behley and C. Stachniss},
    title = {{Domain Transfer for Semantic Segmentation of LiDAR Data using Deep Neural Networks}},
    booktitle = iros,
    year = {2020},
    url = {https://www.ipb.uni-bonn.de/pdfs/langer2020iros.pdf},
    videourl = {https://youtu.be/6FNGF4hKBD0},
    codeurl = {https://github.com/PRBonn/lidar_transfer},
    }

  • F. Magistri, N. Chebrolu, and C. Stachniss, “Segmentation-Based 4D Registration of Plants Point Clouds for Phenotyping,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2020.
    [BibTeX] [PDF] [Video]
    @inproceedings{magistri2020iros,
    author = {F. Magistri and N. Chebrolu and C. Stachniss},
    title = {{Segmentation-Based 4D Registration of Plants Point Clouds for Phenotyping}},
    booktitle = iros,
    year = {2020},
    url={https://www.ipb.uni-bonn.de/pdfs/magistri2020iros.pdf},
    videourl = {https://youtu.be/OV39kb5Nqg8},
    }

  • D. Gogoll, P. Lottes, J. Weyler, N. Petrinic, and C. Stachniss, “Unsupervised Domain Adaptation for Transferring Plant Classification Systems to New Field Environments, Crops, and Robots,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2020.
    [BibTeX] [PDF] [Video]
    @inproceedings{gogoll2020iros,
    author = {D. Gogoll and P. Lottes and J. Weyler and N. Petrinic and C. Stachniss},
    title = {{Unsupervised Domain Adaptation for Transferring Plant Classification Systems to New Field Environments, Crops, and Robots}},
    booktitle = iros,
    year = {2020},
    url = {https://www.ipb.uni-bonn.de/pdfs/gogoll2020iros.pdf},
    videourl = {https://www.youtube.com/watch?v=6K79Ih6KXTs},
    }

  • X. Chen, T. Läbe, A. Milioto, T. Röhling, O. Vysotska, A. Haag, J. Behley, and C. Stachniss, “OverlapNet: Loop Closing for LiDAR-based SLAM,” in Proc. of Robotics: Science and Systems (RSS), 2020.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{chen2020rss,
    author = {X. Chen and T. L\"abe and A. Milioto and T. R\"ohling and O. Vysotska and A. Haag and J. Behley and C. Stachniss},
    title = {{OverlapNet: Loop Closing for LiDAR-based SLAM}},
    booktitle = rss,
    year = {2020},
    codeurl = {https://github.com/PRBonn/OverlapNet/},
    videourl = {https://youtu.be/YTfliBco6aw},
    }

  • W. Förstner, “Symmetric Least Squares Matching – Sym-LSM,” Institut für Photogrammetrie, Universität Bonn 2020.
    [BibTeX] [PDF] [Code]
    @TechReport{foerstner2020report-sym-lsm,
    author = {F{\"o}rstner, Wolfgang},
    title = {{Symmetric Least Squares Matching -- Sym-LSM}},
    institution = {Institut für Photogrammetrie, Universität Bonn},
    year = {2020},
    codeurl = {https://www.ipb.uni-bonn.de/symmetric-least-squares-matching},
    }

  • N. Chebrolu, T. Laebe, O. Vysotska, J. Behley, and C. Stachniss, “Adaptive Robust Kernels for Non-Linear Least Squares Problems,” arXiv Preprint, 2020.
    [BibTeX] [PDF]
    @article{chebrolu2020arxiv,
    title={Adaptive Robust Kernels for Non-Linear Least Squares Problems},
    author={N. Chebrolu and T. Laebe and O. Vysotska and J. Behley and C. Stachniss},
    journal = arxiv,
    year=2020,
    eprint={2004.14938},
    keywords={cs.RO},
    url={https://arxiv.org/pdf/2004.14938v2}
    }

  • J. Behley, A. Milioto, and C. Stachniss, “A Benchmark for LiDAR-based Panoptic Segmentation based on KITTI,” arXiv Preprint, 2020.
    [BibTeX] [PDF]

    Panoptic segmentation is the recently introduced task that tackles semantic segmentation and instance segmentation jointly. In this paper, we present an extension of SemanticKITTI, which is a large-scale dataset providing dense point-wise semantic labels for all sequences of the KITTI Odometry Benchmark, for training and evaluation of laser-based panoptic segmentation. We provide the data and discuss the processing steps needed to enrich a given semantic annotation with temporally consistent instance information, i.e., instance information that supplements the semantic labels and identifies the same instance over sequences of LiDAR point clouds. Additionally, we present two strong baselines that combine state-of-the-art LiDAR-based semantic segmentation approaches with a state-of-the-art detector enriching the segmentation with instance information and that allow other researchers to compare their approaches against. We hope that our extension of SemanticKITTI with strong baselines enables the creation of novel algorithms for LiDAR-based panoptic segmentation as much as it has for the original semantic segmentation and semantic scene completion tasks. Data, code, and an online evaluation using a hidden test set will be published on https://semantic-kitti.org.

    @article{behley2020arxiv,
    author = {J. Behley and A. Milioto and C. Stachniss},
    title = {{A Benchmark for LiDAR-based Panoptic Segmentation based on KITTI}},
    journal = arxiv,
    year = 2020,
    eprint = {2003.02371v1},
    url = {https://arxiv.org/pdf/2003.02371v1},
    keywords = {cs.CV},
    abstract = {Panoptic segmentation is the recently introduced task that tackles semantic segmentation and instance segmentation jointly. In this paper, we present an extension of SemanticKITTI, which is a large-scale dataset providing dense point-wise semantic labels for all sequences of the KITTI Odometry Benchmark, for training and evaluation of laser-based panoptic segmentation. We provide the data and discuss the processing steps needed to enrich a given semantic annotation with temporally consistent instance information, i.e., instance information that supplements the semantic labels and identifies the same instance over sequences of LiDAR point clouds. Additionally, we present two strong baselines that combine state-of-the-art LiDAR-based semantic segmentation approaches with a state-of-the-art detector enriching the segmentation with instance information and that allow other researchers to compare their approaches against. We hope that our extension of SemanticKITTI with strong baselines enables the creation of novel algorithms for LiDAR-based panoptic segmentation as much as it has for the original semantic segmentation and semantic scene completion tasks. Data, code, and an online evaluation using a hidden test set will be published on https://semantic-kitti.org.}
    }

  • X. Wu, S. Aravecchia, P. Lottes, C. Stachniss, and C. Pradalier, “Robotic Weed Control Using Automated Weed and Crop Classification,” Journal of Field Robotics (JFR), vol. 37, pp. 322-340, 2020.
    [BibTeX] [PDF]
    @Article{wu2020jfr,
    title = {Robotic Weed Control Using Automated Weed and Crop Classification},
    author = {X. Wu and S. Aravecchia and P. Lottes and C. Stachniss and C. Pradalier},
    journal = jfr,
    year = {2020},
    volume = {37},
    numer = {2},
    pages = {322-340},
    url = {https://www.ipb.uni-bonn.de/pdfs/wu2020jfr.pdf},
    }

  • P. Lottes, J. Behley, N. Chebrolu, A. Milioto, and C. Stachniss, “Robust joint stem detection and crop-weed classification using image sequences for plant-specific treatment in precision farming,” Journal of Field Robotics (JFR), vol. 37, pp. 20-34, 2020. doi:https://doi.org/10.1002/rob.21901
    [BibTeX] [PDF]
    @Article{lottes2020jfr,
    title = {Robust joint stem detection and crop-weed classification using image sequences for plant-specific treatment in precision farming},
    author = {Lottes, P. and Behley, J. and Chebrolu, N. and Milioto, A. and Stachniss, C.},
    journal = jfr,
    volume = {37},
    numer = {1},
    pages = {20-34},
    year = {2020},
    doi = {https://doi.org/10.1002/rob.21901},
    url = {https://www.ipb.uni-bonn.de/pdfs/lottes2019jfr.pdf},
    }

  • N. Chebrolu, T. Laebe, and C. Stachniss, “Spatio-Temporal Non-Rigid Registration of 3D Point Clouds of Plants,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2020.
    [BibTeX] [PDF] [Video]
    @InProceedings{chebrolu2020icra,
    title = {Spatio-Temporal Non-Rigid Registration of 3D Point Clouds of Plants},
    author = {N. Chebrolu and T. Laebe and C. Stachniss},
    booktitle = icra,
    year = {2020},
    url = {https://www.ipb.uni-bonn.de/pdfs/chebrolu2020icra.pdf},
    videourl = {https://www.youtube.com/watch?v=uGkep_aelBc},
    }

  • A. Ahmadi, L. Nardi, N. Chebrolu, and C. Stachniss, “Visual Servoing-based Navigation for Monitoring Row-Crop Fields,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2020.
    [BibTeX] [PDF] [Code] [Video]
    @InProceedings{ahmadi2020icra,
    title = {Visual Servoing-based Navigation for Monitoring Row-Crop Fields},
    author = {A. Ahmadi and L. Nardi and N. Chebrolu and C. Stachniss},
    booktitle = icra,
    year = {2020},
    url = {https://arxiv.org/pdf/1909.12754},
    codeurl = {https://github.com/PRBonn/visual-crop-row-navigation},
    videourl = {https://youtu.be/0qg6n4sshHk},
    }

  • L. Nardi and C. Stachniss, “Long-Term Robot Navigation in Indoor Environments Estimating Patterns in Traversability Changes,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2020.
    [BibTeX] [PDF] [Video]
    @InProceedings{nardi2020icra,
    title = {Long-Term Robot Navigation in Indoor Environments Estimating Patterns in Traversability Changes},
    author = {L. Nardi and C. Stachniss},
    booktitle = icra,
    year = {2020},
    url = {https://arxiv.org/pdf/1909.12733},
    videourl = {https://www.youtube.com/watch?v=9lNcA3quzwU},
    }

  • R. Sheikh, A. Milioto, P. Lottes, C. Stachniss, M. Bennewitz, and T. Schultz, “Gradient and Log-based Active Learning for Semantic Segmentation of Crop and Weed for Agricultural Robots,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2020.
    [BibTeX] [PDF] [Video]
    @InProceedings{sheikh2020icra,
    title = {Gradient and Log-based Active Learning for Semantic Segmentation of Crop and Weed for Agricultural Robots},
    author = {R. Sheikh and A. Milioto and P. Lottes and C. Stachniss and M. Bennewitz and T. Schultz},
    booktitle = icra,
    year = {2020},
    url = {https://www.ipb.uni-bonn.de/pdfs/sheikh2020icra.pdf},
    videourl = {https://www.youtube.com/watch?v=NySa59gxFAg},
    }

  • J. Quenzel, R. A. Rosu, T. Laebe, C. Stachniss, and S. Behnke, “Beyond Photometric Consistency: Gradient-based Dissimilarity for Improving Visual Odometry and Stereo Matching,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2020.
    [BibTeX] [PDF] [Video]
    @InProceedings{quenzel020icra,
    title = {Beyond Photometric Consistency: Gradient-based Dissimilarity for Improving Visual Odometry and Stereo Matching},
    author = {J. Quenzel and R.A. Rosu and T. Laebe and C. Stachniss and S. Behnke},
    booktitle = icra,
    year = {2020},
    url = {https://www.ipb.uni-bonn.de/pdfs/quenzel2020icra.pdf},
    videourl = {https://www.youtube.com/watch?v=cqv7k-BK0g0},
    }

  • P. Regier, A. Milioto, C. Stachniss, and M. Bennewitz, “Classifying Obstacles and Exploiting Class Information for Humanoid Navigation Through Cluttered Environments,” The Intl. Journal of Humanoid Robotics (IJHR), vol. 17, iss. 02, p. 2050013, 2020. doi:10.1142/S0219843620500139
    [BibTeX] [PDF]

    Humanoid robots are often supposed to share their workspace with humans and thus have to deal with objects used by humans in their everyday life. In this article, we present our novel approach to humanoid navigation through cluttered environments, which exploits knowledge about different obstacle classes to decide how to deal with obstacles and select appropriate robot actions. To classify objects from RGB images and decide whether an obstacle can be overcome by the robot with a corresponding action, e.g., by pushing or carrying it aside or stepping over or onto it, we train and exploit a convolutional neural network (CNN). Based on associated action costs, we compute a cost grid containing newly observed objects in addition to static obstacles on which a 2D path can be efficiently planned. This path encodes the necessary actions that need to be carried out by the robot to reach the goal. We implemented our framework in the Robot Operating System (ROS) and tested it in various scenarios with a Nao robot as well as in simulation with the REEM-C robot. As the experiments demonstrate, using our CNN, the robot can robustly classify the observed obstacles into the different classes and decide on suitable actions to find efficient solution paths. Our system finds paths also through regions where traditional motion planning methods are not able to calculate a solution or require substantially more time.

    @article{regier2020ijhr,
    author = {Regier, P. and Milioto, A. and Stachniss, C. and Bennewitz, M.},
    title = {{Classifying Obstacles and Exploiting Class Information for Humanoid Navigation Through Cluttered Environments}},
    journal = ijhr,
    volume = {17},
    number = {02},
    pages = {2050013},
    year = {2020},
    doi = {10.1142/S0219843620500139},
    abstract = {Humanoid robots are often supposed to share their workspace with humans and thus have to deal with objects used by humans in their everyday life. In this article, we present our novel approach to humanoid navigation through cluttered environments, which exploits knowledge about different obstacle classes to decide how to deal with obstacles and select appropriate robot actions. To classify objects from RGB images and decide whether an obstacle can be overcome by the robot with a corresponding action, e.g., by pushing or carrying it aside or stepping over or onto it, we train and exploit a convolutional neural network (CNN). Based on associated action costs, we compute a cost grid containing newly observed objects in addition to static obstacles on which a 2D path can be efficiently planned. This path encodes the necessary actions that need to be carried out by the robot to reach the goal. We implemented our framework in the Robot Operating System (ROS) and tested it in various scenarios with a Nao robot as well as in simulation with the REEM-C robot. As the experiments demonstrate, using our CNN, the robot can robustly classify the observed obstacles into the different classes and decide on suitable actions to find efficient solution paths. Our system finds paths also through regions where traditional motion planning methods are not able to calculate a solution or require substantially more time. }
    }

2019

  • E. Palazzolo, “Active 3D Reconstruction for Mobile Robots,” PhD Thesis, 2019.
    [BibTeX] [PDF]
    @PhdThesis{palazzolo2019phd,
    author = {Palazzolo, E.},
    title = {Active 3D Reconstruction for Mobile Robots},
    year = 2019,
    school = {University of Bonn},
    URL = {https://www.ipb.uni-bonn.de/pdfs/palazzolo2019phd.pdf}
    }

  • J. Behley, M. Garbade, A. Milioto, J. Quenzel, S. Behnke, C. Stachniss, and J. Gall, “SemanticKITTI: A Dataset for Semantic Scene Understanding of LiDAR Sequences,” in Proc. of the IEEE/CVF Intl. Conf. on Computer Vision (ICCV), 2019.
    [BibTeX] [PDF] [Video]
    @InProceedings{behley2019iccv,
    author = {J. Behley and M. Garbade and A. Milioto and J. Quenzel and S. Behnke and C. Stachniss and J. Gall},
    title = {{SemanticKITTI: A Dataset for Semantic Scene Understanding of LiDAR Sequences}},
    booktitle = iccv,
    year = {2019},
    videourl = {https://www.ipb.uni-bonn.de/html/projects/semantic_kitti/videos/teaser.mp4},
    }

  • O. Vysotska, “Visual Place Recognition in Changing Environments,” PhD Thesis, 2019.
    [BibTeX] [PDF]
    @PhdThesis{vysotska2019phd,
    author = {O. Vysotska},
    title = {Visual Place Recognition in Changing Environments},
    year = 2019,
    school =  {University of Bonn},
    URL = {https://hss.ulb.uni-bonn.de/2019/5593/5593.pdf},
    }

  • A. Pretto, S. Aravecchia, W. Burgard, N. Chebrolu, C. Dornhege, T. Falck, F. Fleckenstein, A. Fontenla, M. Imperoli, R. Khanna, F. Liebisch, P. Lottes, A. Milioto, D. Nardi, S. Nardi, J. Pfeifer, M. Popović, C. Potena, C. Pradalier, E. Rothacker-Feder, I. Sa, A. Schaefer, R. Siegwart, C. Stachniss, A. Walter, W. Winterhalter, X. Wu, and J. Nieto, “Building an Aerial-Ground Robotics System for Precision Farming,” arXiv Preprint, 2019.
    [BibTeX] [PDF]
    @article{pretto2019arxiv,
    author = {A. Pretto and S. Aravecchia and W. Burgard and N. Chebrolu and C. Dornhege and T. Falck and F. Fleckenstein and A. Fontenla and M. Imperoli and R. Khanna and F. Liebisch and P. Lottes and A. Milioto and D. Nardi and S. Nardi and J. Pfeifer and M. Popović and C. Potena and C. Pradalier and E. Rothacker-Feder and I. Sa and A. Schaefer and R. Siegwart and C. Stachniss and A. Walter and W. Winterhalter and X. Wu and J. Nieto},
    title = {{Building an Aerial-Ground Robotics System for Precision Farming}},
    journal = arxiv,
    year = 2019,
    eprint = {1911.03098v1},
    url = {https://arxiv.org/pdf/1911.03098v1},
    keywords = {cs.RO},
    }

  • O. Vysotska and C. Stachniss, “Effective Visual Place Recognition Using Multi-Sequence Maps,” IEEE Robotics and Automation Letters (RA-L), vol. 4, pp. 1730-1736, 2019.
    [BibTeX] [PDF] [Video]
    @article{vysotska2019ral,
    author = {O. Vysotska and C. Stachniss},
    title = {{Effective Visual Place Recognition Using Multi-Sequence Maps}},
    journal = ral,
    year = 2019,
    volume = 4,
    issue = 2,
    pages = {1730-1736},
    url = {https://www.ipb.uni-bonn.de/pdfs/vysotska2019ral.pdf},
    videourl = {https://youtu.be/wFU0JoXTH3c},
    }

  • E. Palazzolo, J. Behley, P. Lottes, P. Giguère, and C. Stachniss, “ReFusion: 3D Reconstruction in Dynamic Environments for RGB-D Cameras Exploiting Residuals,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2019.
    [BibTeX] [PDF] [Code] [Video]
    @InProceedings{palazzolo2019iros,
    author = {E. Palazzolo and J. Behley and P. Lottes and P. Gigu\`ere and C. Stachniss},
    title = {{ReFusion: 3D Reconstruction in Dynamic Environments for RGB-D Cameras Exploiting Residuals}},
    booktitle = iros,
    year = {2019},
    url = {https://www.ipb.uni-bonn.de/pdfs/palazzolo2019iros.pdf},
    codeurl = {https://github.com/PRBonn/refusion},
    videourl = {https://youtu.be/1P9ZfIS5-p4},
    }

  • X. Chen, A. Milioto, E. Palazzolo, P. Giguère, J. Behley, and C. Stachniss, “SuMa++: Efficient LiDAR-based Semantic SLAM,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2019.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{chen2019iros,
    author = {X. Chen and A. Milioto and E. Palazzolo and P. Giguère and J. Behley and C. Stachniss},
    title = {{SuMa++: Efficient LiDAR-based Semantic SLAM}},
    booktitle = iros,
    year = 2019,
    codeurl = {https://github.com/PRBonn/semantic_suma/},
    videourl = {https://youtu.be/uo3ZuLuFAzk},
    }

  • A. Milioto, I. Vizzo, J. Behley, and C. Stachniss, “RangeNet++: Fast and Accurate LiDAR Semantic Segmentation,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2019.
    [BibTeX] [PDF] [Code] [Video]
    @inproceedings{milioto2019iros,
    author = {A. Milioto and I. Vizzo and J. Behley and C. Stachniss},
    title = {{RangeNet++: Fast and Accurate LiDAR Semantic Segmentation}},
    booktitle = iros,
    year = 2019,
    codeurl = {https://github.com/PRBonn/lidar-bonnetal},
    videourl = {https://youtu.be/wuokg7MFZyU},
    }

  • F. Yan, O. Vysotska, and C. Stachniss, ” Global Localization on OpenStreetMap Using 4-bit Semantic Descriptors,” in Proc. of the European Conf. on Mobile Robots (ECMR), 2019.
    [BibTeX] [PDF]
    @InProceedings{yan2019ecmr,
    author = {F. Yan and O. Vysotska and C. Stachniss},
    title = {{ Global Localization on OpenStreetMap Using 4-bit Semantic Descriptors}},
    booktitle = ecmr,
    year = {2019},
    }

  • L. Zabawa, A. Kicherer, L. Klingbeil, A. Milioto, R. Topfer, H. Kuhlmann, and R. Roscher, “Detection of Single Grapevine Berries in Images Using Fully Convolutional Neural Networks,” in The IEEE Conf. on Computer Vision and Pattern Recognition (CVPR) Workshops, 2019.
    [BibTeX] [PDF]
    @InProceedings{zabawa2019cvpr-workshop,
    author = {L. Zabawa and A. Kicherer and L. Klingbeil and A. Milioto and R. Topfer and H. Kuhlmann and R. Roscher},
    title = {{Detection of Single Grapevine Berries in Images Using Fully Convolutional Neural Networks}},
    booktitle = {The IEEE Conf. on Computer Vision and Pattern Recognition (CVPR) Workshops},
    month = {June},
    year = {2019}
    }

  • O. Vysotska, H. Kuhlmann, and C. Stachniss, “UAVs Towards Sustainable Crop Production,” in Workshop at Robotics: Science and Systems, 2019.
    [BibTeX] [PDF]
    @InProceedings{vysotska2019rsswsabstract,
    author = {O. Vysotska and H. Kuhlmann and C. Stachniss},
    title = {{UAVs Towards Sustainable Crop Production}},
    booktitle = {Workshop at Robotics: Science and Systems},
    year = {2019},
    note = {Abstract},
    }

  • A. Milioto and C. Stachniss, “Bonnet: An Open-Source Training and Deployment Framework for Semantic Segmentation in Robotics using CNNs,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2019.
    [BibTeX] [PDF] [Code] [Video]
    @InProceedings{milioto2019icra,
    author = {A. Milioto and C. Stachniss},
    title = {{Bonnet: An Open-Source Training and Deployment Framework for Semantic Segmentation in Robotics using CNNs}},
    booktitle = icra,
    year = 2019,
    codeurl = {https://github.com/Photogrammetry-Robotics-Bonn/bonnet},
    videourl = {https://www.youtube.com/watch?v=tfeFHCq6YJs},
    }

  • A. Milioto, L. Mandtler, and C. Stachniss, “Fast Instance and Semantic Segmentation Exploiting Local Connectivity, Metric Learning, and One-Shot Detection for Robotics ,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2019.
    [BibTeX] [PDF]
    @InProceedings{milioto2019icra-fiass,
    author = {A. Milioto and L. Mandtler and C. Stachniss},
    title = {{Fast Instance and Semantic Segmentation Exploiting Local Connectivity, Metric Learning, and One-Shot Detection for Robotics }},
    booktitle = icra,
    year = 2019,
    }

  • L. Nardi and C. Stachniss, “Uncertainty-Aware Path Planning for Navigation on Road Networks Using Augmented MDPs ,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2019.
    [BibTeX] [PDF] [Video]
    @InProceedings{nardi2019icra-uapp,
    author = {L. Nardi and C. Stachniss},
    title = {{Uncertainty-Aware Path Planning for Navigation on Road Networks Using Augmented MDPs }},
    booktitle = icra,
    year = 2019,
    url={https://www.ipb.uni-bonn.de/pdfs/nardi2019icra-uapp.pdf},
    videourl = {https://youtu.be/3PMSamgYzi4},
    }

  • L. Nardi and C. Stachniss, “Actively Improving Robot Navigation On Different Terrains Using Gaussian Process Mixture Models,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2019.
    [BibTeX] [PDF] [Video]
    @InProceedings{nardi2019icra-airn,
    author = {L. Nardi and C. Stachniss},
    title = {{Actively Improving Robot Navigation On Different Terrains Using Gaussian Process Mixture Models}},
    booktitle = icra,
    year = 2019,
    url={https://www.ipb.uni-bonn.de/pdfs/nardi2019icra-airn.pdf},
    videourl = {https://youtu.be/DlMbP3u1g2Y},
    }

  • D. Wilbers, C. Merfels, and C. Stachniss, “Localization with Sliding Window Factor Graphs on Third-Party Maps for Automated Driving,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2019.
    [BibTeX] [PDF]
    @InProceedings{wilbers2019icra,
    author = {D. Wilbers and Ch. Merfels and C. Stachniss},
    title = {{Localization with Sliding Window Factor Graphs on Third-Party Maps for Automated Driving}},
    booktitle = icra,
    year = 2019,
    }

  • N. Chebrolu, P. Lottes, T. Laebe, and C. Stachniss, “Robot Localization Based on Aerial Images for Precision Agriculture Tasks in Crop Fields,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2019.
    [BibTeX] [PDF] [Video]
    @InProceedings{chebrolu2019icra,
    author = {N. Chebrolu and P. Lottes and T. Laebe and C. Stachniss},
    title = {{Robot Localization Based on Aerial Images for Precision Agriculture Tasks in Crop Fields}},
    booktitle = icra,
    year = 2019,
    url = {https://www.ipb.uni-bonn.de/pdfs/chebrolu2019icra.pdf},
    videourl = {https://youtu.be/TlijLgoRLbc},
    }

  • K. Huang, J. Xiao, and C. Stachniss, “Accurate Direct Visual-Laser Odometry with Explicit Occlusion Handling and Plane Detection,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2019.
    [BibTeX] [PDF]
    @InProceedings{huang2019icra,
    author = {K. Huang and J. Xiao and C. Stachniss},
    title = {{Accurate Direct Visual-Laser Odometry with Explicit Occlusion Handling and Plane Detection}},
    booktitle = icra,
    year = 2019,
    }

  • R. Schirmer, P. Bieber, and C. Stachniss, “Coverage Path Planning in Belief Space ,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2019.
    [BibTeX] [PDF]
    @InProceedings{schirmer2019icra,
    author = {R. Schirmer and P. Bieber and C. Stachniss},
    title = {{Coverage Path Planning in Belief Space }},
    booktitle = icra,
    year = 2019,
    }

  • D. Wilbers, L. Rumberg, and C. Stachniss, “Approximating Marginalization with Sparse Global Priors for Sliding Window SLAM-Graphs,” in Proc. of the IEEE Intl. Conf. on Robotic Computing (IRC), 2019.
    [BibTeX] [PDF]

    Most autonomous vehicles rely on some kind of map for localization or navigation. Outdated maps however are a risk to the performance of any map-based localization system applied in autonomous vehicles. It is necessary to update the used maps to ensure stable and long-term operation. We address the problem of computing landmark updates live in the vehicle, which requires efficient use of the computational resources. In particular, we employ a graph-based sliding window approach for simultaneous localization and incremental map refinement. We propose a novel method that approximates sliding window marginalization without inducing fill-in. Our method maintains the exact same sparsity pattern as without performing marginalization, but simultaneously improves the landmark estimates. The main novelty of this work is the derivation of sparse global priors that approximate dense marginalization. In comparison to state-of-the-art work, our approach utilizes global instead of local linearization points, but still minimizes linearization errors. We first approximate marginalization via Kullback-Leibler divergence and then recalculate the mean to compensate linearization errors. We evaluate our approach on simulated and real data from a prototype vehicle and compare our approach to state-of-the-art sliding window marginalization.

    @InProceedings{wilbers2019irc-amws,
    author = {D. Wilbers and L. Rumberg and C. Stachniss},
    title = {{Approximating Marginalization with Sparse Global Priors for Sliding Window SLAM-Graphs}},
    booktitle = {Proc. of the IEEE Intl. Conf. on Robotic Computing (IRC)},
    year = 2019,
    abstract = {Most autonomous vehicles rely on some kind of map for localization or navigation. Outdated maps however are a risk to the performance of any map-based localization system applied in autonomous vehicles. It is necessary to update the used maps to ensure stable and long-term operation. We address the problem of computing landmark updates live in the vehicle, which requires efficient use of the computational resources. In particular, we employ a graph-based sliding window approach for simultaneous localization and incremental map refinement. We propose a novel method that approximates sliding window marginalization without inducing fill-in. Our method maintains the exact same sparsity pattern as without performing marginalization, but simultaneously improves the landmark estimates. The main novelty of this work is the derivation of sparse global priors that approximate dense marginalization. In comparison to state-of-the-art work, our approach utilizes global instead of local linearization points, but still minimizes linearization errors. We first approximate marginalization via Kullback-Leibler divergence and then recalculate the mean to compensate linearization errors. We evaluate our approach on simulated and real data from a prototype vehicle and compare our approach to state-of-the-art sliding window marginalization.},
    }

  • D. Wilbers, C. Merfels, and C. Stachniss, “A Comparison of Particle Filter and Graph-based Optimization for Localization with Landmarks in Automated Vehicles,” in Proc. of the IEEE Intl. Conf. on Robotic Computing (IRC), 2019.
    [BibTeX] [PDF]
    @InProceedings{wilbers2019irc-cpfg,
    author = {D. Wilbers and Ch. Merfels and C. Stachniss},
    title = {{A Comparison of Particle Filter and Graph-based Optimization for Localization with Landmarks in Automated Vehicles}},
    booktitle = {Proc. of the IEEE Intl. Conf. on Robotic Computing (IRC)},
    year = 2019,
    }

  • P. Lottes, N. Chebrolu, F. Liebisch, and C. Stachniss, “UAV-based Field Monitoring for Precision Farming,” in Proc. of the 25th Workshop für Computer-Bildanalyse und unbemannte autonom fliegende Systeme in der Landwirtschaft, 2019.
    [BibTeX] [PDF]
    @InProceedings{lottes2019cbaws,
    title={UAV-based Field Monitoring for Precision Farming},
    author={P. Lottes and N. Chebrolu and F. Liebisch and C. Stachniss},
    booktitle= {Proc. of the 25th Workshop f\"ur Computer-Bildanalyse und unbemannte autonom fliegende Systeme in der Landwirtschaft},
    year= {2019},
    url= {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/lottes2019cbaws.pdf},
    }

  • L. Klingbeil, E. Heinz, M. Wieland, J. Eichel, T. Läbe, and H. Kuhlmann, “On the UAV based Analysis of Slow Geomorphological Processes: A Case Study at a Solifluction Lobe in the Turtmann Valley,” in Proc. of the 4th Joint International Symposium on Deformation Monitoring (JISDM), 2019.
    [BibTeX] [PDF]
    @InProceedings{klingbeil19jisdm,
    author = {L. Klingbeil and E. Heinz and M. Wieland and J. Eichel and T. L\"abe and H. Kuhlmann},
    title = {On the UAV based Analysis of Slow Geomorphological Processes: A Case Study at a Solifluction Lobe in the Turtmann Valley},
    booktitle = {Proc. of the 4th Joint International Symposium on Deformation Monitoring (JISDM)},
    year = 2019,
    url = {https://www.ipb.uni-bonn.de/pdfs/klingbeil19jisdm.pdf},
    }

2018

  • I. Sa, M. Popovic, R. Khanna, Z. Chen, P. Lottes, F. Liebisch, J. Nieto, C. Stachniss, and R. Siegwart, “WeedMap: A Large-Scale Semantic Weed Mapping Framework Using Aerial Multispectral Imaging and Deep Neural Network for Precision Farming,” , vol. 10, 2018. doi:10.3390/rs10091423
    [BibTeX] [PDF]

    {The ability to automatically monitor agricultural fields is an important capability in precision farming, enabling steps towards more sustainable agriculture. Precise, high-resolution monitoring is a key prerequisite for targeted intervention and the selective application of agro-chemicals. The main goal of this paper is developing a novel crop/weed segmentation and mapping framework that processes multispectral images obtained from an unmanned aerial vehicle (UAV) using a deep neural network (DNN). Most studies on crop/weed semantic segmentation only consider single images for processing and classification. Images taken by UAVs often cover only a few hundred square meters with either color only or color and near-infrared (NIR) channels. Although a map can be generated by processing single segmented images incrementally, this requires additional complex information fusion techniques which struggle to handle high fidelity maps due to their computational costs and problems in ensuring global consistency. Moreover, computing a single large and accurate vegetation map (e.g., crop/weed) using a DNN is non-trivial due to difficulties arising from: (1) limited ground sample distances (GSDs) in high-altitude datasets, (2) sacrificed resolution resulting from downsampling high-fidelity images, and (3) multispectral image alignment. To address these issues, we adopt a stand sliding window approach that operates on only small portions of multispectral orthomosaic maps (tiles), which are channel-wise aligned and calibrated radiometrically across the entire map. We define the tile size to be the same as that of the DNN input to avoid resolution loss. Compared to our baseline model (i.e., SegNet with 3 channel RGB inputs) yielding an area under the curve (AUC) of [background=0.607

    @Article{sa2018rs,
    author = {I. Sa and M. Popovic and R. Khanna and Z. Chen and P. Lottes and F. Liebisch and J. Nieto and C. Stachniss and R. Siegwart},
    title = {{WeedMap: A Large-Scale Semantic Weed Mapping Framework Using Aerial Multispectral Imaging and Deep Neural Network for Precision Farming}},
    journal = rs,
    year = 2018,
    volume = 10,
    issue = 9,
    url = {https://www.mdpi.com/2072-4292/10/9/1423/pdf},
    doi = {10.3390/rs10091423},
    abstract = {The ability to automatically monitor agricultural fields is an important capability in precision farming, enabling steps towards more sustainable agriculture. Precise, high-resolution monitoring is a key prerequisite for targeted intervention and the selective application of agro-chemicals. The main goal of this paper is developing a novel crop/weed segmentation and mapping framework that processes multispectral images obtained from an unmanned aerial vehicle (UAV) using a deep neural network (DNN). Most studies on crop/weed semantic segmentation only consider single images for processing and classification. Images taken by UAVs often cover only a few hundred square meters with either color only or color and near-infrared (NIR) channels. Although a map can be generated by processing single segmented images incrementally, this requires additional complex information fusion techniques which struggle to handle high fidelity maps due to their computational costs and problems in ensuring global consistency. Moreover, computing a single large and accurate vegetation map (e.g., crop/weed) using a DNN is non-trivial due to difficulties arising from: (1) limited ground sample distances (GSDs) in high-altitude datasets, (2) sacrificed resolution resulting from downsampling high-fidelity images, and (3) multispectral image alignment. To address these issues, we adopt a stand sliding window approach that operates on only small portions of multispectral orthomosaic maps (tiles), which are channel-wise aligned and calibrated radiometrically across the entire map. We define the tile size to be the same as that of the DNN input to avoid resolution loss. Compared to our baseline model (i.e., SegNet with 3 channel RGB inputs) yielding an area under the curve (AUC) of [background=0.607, crop=0.681, weed=0.576], our proposed model with 9 input channels achieves [0.839, 0.863, 0.782]. Additionally, we provide an extensive analysis of 20 trained models, both qualitatively and quantitatively, in order to evaluate the effects of varying input channels and tunable network hyperparameters. Furthermore, we release a large sugar beet/weed aerial dataset with expertly guided annotations for further research in the fields of remote sensing, precision agriculture, and agricultural robotics.},
    }

  • N. Chebrolu, T. Läbe, and C. Stachniss, “Robust Long-Term Registration of UAV Images of Crop Fields for Precision Agriculture,” IEEE Robotics and Automation Letters (RA-L), vol. 3, iss. 4, pp. 3097-3104, 2018. doi:10.1109/LRA.2018.2849603
    [BibTeX] [PDF]
    @Article{chebrolu2018ral,
    author={N. Chebrolu and T. L\"abe and C. Stachniss},
    journal=ral,
    title={Robust Long-Term Registration of UAV Images of Crop Fields for Precision Agriculture},
    year={2018},
    volume={3},
    number={4},
    pages={3097-3104},
    keywords={Agriculture;Cameras;Geometry;Monitoring;Robustness;Three-dimensional displays;Visualization;Robotics in agriculture and forestry;SLAM},
    doi={10.1109/LRA.2018.2849603},
    url={https://www.ipb.uni-bonn.de/pdfs/chebrolu2018ral.pdf}
    }

  • P. Lottes, J. Behley, A. Milioto, and C. Stachniss, “Fully Convolutional Networks with Sequential Information for Robust Crop and Weed Detection in Precision Farming,” IEEE Robotics and Automation Letters (RA-L), vol. 3, pp. 3097-3104, 2018. doi:10.1109/LRA.2018.2846289
    [BibTeX] [PDF] [Video]
    @Article{lottes2018ral,
    author = {P. Lottes and J. Behley and A. Milioto and C. Stachniss},
    title = {Fully Convolutional Networks with Sequential Information for Robust Crop and Weed Detection in Precision Farming},
    journal = ral,
    year = {2018},
    volume = {3},
    issue = {4},
    pages = {3097-3104},
    doi = {10.1109/LRA.2018.2846289},
    url = {https://www.ipb.uni-bonn.de/pdfs/lottes2018ral.pdf},
    videourl = {https://www.youtube.com/watch?v=vTepw9HRLh8},
    }

  • P. Regier, A. Milioto, P. Karkowski, C. Stachniss, and M. Bennewitz, “Classifying Obstacles and Exploiting Knowledge about Classes for Efficient Humanoid Navigation,” in Proc. of the IEEE-RAS Int. Conf. on Humanoid Robots (HUMANOIDS), 2018.
    [BibTeX] [PDF]
    @InProceedings{regier2018humanoids,
    author = {P. Regier and A. Milioto and P. Karkowski and C. Stachniss and M. Bennewitz},
    title = {{Classifying Obstacles and Exploiting Knowledge about Classes for Efficient Humanoid Navigation}},
    booktitle = {Proc. of the IEEE-RAS Int. Conf. on Humanoid Robots (HUMANOIDS)},
    year = 2018,
    }

  • K. H. Huang and C. Stachniss, “Joint Ego-motion Estimation Using a Laser Scanner and a Monocular Camera Through Relative Orientation Estimation and 1-DoF ICP,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2018.
    [BibTeX] [PDF] [Video]

    Pose estimation and mapping are key capabilities of most autonomous vehicles and thus a number of localization and SLAM algorithms have been developed in the past. Autonomous robots and cars are typically equipped with multiple sensors. Often, the sensor suite includes a camera and a laser range finder. In this paper, we consider the problem of incremental ego-motion estimation, using both, a monocular camera and a laser range finder jointly. We propose a new algorithm, that exploits the advantages of both sensors–-the ability of cameras to determine orientations well and the ability of laser range finders to estimate the scale and to directly obtain 3D point clouds. Our approach estimates the five degree of freedom relative orientation from image pairs through feature point correspondences and formulates the remaining scale estimation as a new variant of the iterative closet point problem with only one degree of freedom. We furthermore exploit the camera information in a new way to constrain the data association between laser point clouds. The experiments presented in this paper suggest that our approach is able to accurately estimate the ego-motion of a vehicle and that we obtain more accurate frame-to-frame alignments than with one sensor modality alone.

    @InProceedings{huang2018iros,
    author = {K.H. Huang and C. Stachniss},
    title = {{Joint Ego-motion Estimation Using a Laser Scanner and a Monocular Camera Through Relative Orientation Estimation and 1-DoF ICP}},
    booktitle = iros,
    year = 2018,
    videourl = {https://www.youtube.com/watch?v=Glv0UT_KqoM},
    abstract = {Pose estimation and mapping are key capabilities of most autonomous vehicles and thus a number of localization and SLAM algorithms have been developed in the past. Autonomous robots and cars are typically equipped with multiple sensors. Often, the sensor suite includes a camera and a laser range finder. In this paper, we consider the problem of incremental ego-motion estimation, using both, a monocular camera and a laser range finder jointly. We propose a new algorithm, that exploits the advantages of both sensors---the ability of cameras to determine orientations well and the ability of laser range finders to estimate the scale and to directly obtain 3D point clouds. Our approach estimates the five degree of freedom relative orientation from image pairs through feature point correspondences and formulates the remaining scale estimation as a new variant of the iterative closet point problem with only one degree of freedom. We furthermore exploit the camera information in a new way to constrain the data association between laser point clouds. The experiments presented in this paper suggest that our approach is able to accurately estimate the ego-motion of a vehicle and that we obtain more accurate frame-to-frame alignments than with one sensor modality alone.}
    }

  • P. Lottes, J. Behley, N. Chebrolu, A. Milioto, and C. Stachniss, “Joint Stem Detection and Crop-Weed Classification for Plant-specific Treatment in Precision Farming,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2018.
    [BibTeX] [PDF] [Video]

    Applying agrochemicals is the default procedure for conventional weed control in crop production, but has negative impacts on the environment. Robots have the potential to treat every plant in the field individually and thus can reduce the required use of such chemicals. To achieve that, robots need the ability to identify crops and weeds in the field and must additionally select effective treatments. While certain types of weed can be treated mechanically, other types need to be treated by (selective) spraying. In this paper, we present an approach that provides the necessary information for effective plant-specific treatment. It outputs the stem location for weeds, which allows for mechanical treatments, and the covered area of the weed for selective spraying. Our approach uses an end-to- end trainable fully convolutional network that simultaneously estimates stem positions as well as the covered area of crops and weeds. It jointly learns the class-wise stem detection and the pixel-wise semantic segmentation. Experimental evaluations on different real-world datasets show that our approach is able to reliably solve this problem. Compared to state-of-the-art approaches, our approach not only substantially improves the stem detection accuracy, i.e., distinguishing crop and weed stems, but also provides an improvement in the semantic segmentation performance.

    @InProceedings{lottes2018iros,
    author = {P. Lottes and J. Behley and N. Chebrolu and A. Milioto and C. Stachniss},
    title = {Joint Stem Detection and Crop-Weed Classification for Plant-specific Treatment in Precision Farming},
    booktitle = iros,
    year = 2018,
    url = {https://www.ipb.uni-bonn.de/pdfs/lottes18iros.pdf},
    videourl = {https://www.youtube.com/watch?v=C9mjZxE_Sxg},
    abstract = {Applying agrochemicals is the default procedure for conventional weed control in crop production, but has negative impacts on the environment. Robots have the potential to treat every plant in the field individually and thus can reduce the required use of such chemicals. To achieve that, robots need the ability to identify crops and weeds in the field and must additionally select effective treatments. While certain types of weed can be treated mechanically, other types need to be treated by (selective) spraying. In this paper, we present an approach that provides the necessary information for effective plant-specific treatment. It outputs the stem location for weeds, which allows for mechanical treatments, and the covered area of the weed for selective spraying. Our approach uses an end-to- end trainable fully convolutional network that simultaneously estimates stem positions as well as the covered area of crops and weeds. It jointly learns the class-wise stem detection and the pixel-wise semantic segmentation. Experimental evaluations on different real-world datasets show that our approach is able to reliably solve this problem. Compared to state-of-the-art approaches, our approach not only substantially improves the stem detection accuracy, i.e., distinguishing crop and weed stems, but also provides an improvement in the semantic segmentation performance.}
    }

  • J. Jung, C. Stachniss, S. Ju, and J. Heo, “Automated 3D volumetric reconstruction of multiple-room building interiors for as-built BIM,” , vol. 38, pp. 811-825, 2018. doi:10.1016/j.aei.2018.10.007
    [BibTeX]

    Currently, fully automated as-built modeling of building interiors using point-cloud data still remains an open challenge, due to several problems that repeatedly arise: (1) complex indoor environments containing multiple rooms; (2) time-consuming and labor-intensive noise filtering; (3) difficulties of representation of volumetric and detail-rich objects such as windows and doors. This study aimed to overcome such limitations while improving the amount of details reproduced within the model for further utilization in BIM. First, we input just the registered three-dimensional (3D) point-cloud data and segmented the point cloud into separate rooms for more effective performance of the later modeling phases for each room. For noise filtering, an offset space from the ceiling height was used to determine whether the scan points belonged to clutter or architectural components. The filtered points were projected onto a binary map in order to trace the floor-wall boundary, which was further refined through subsequent segmentation and regularization procedures. Then, the wall volumes were estimated in two ways: inside- and outside-wall-component modeling. Finally, the wall points were segmented and projected onto an inverse binary map, thereby enabling detection and modeling of the hollow areas as windows or doors. The experimental results on two real-world data sets demonstrated, through comparison with manually-generated models, the effectiveness of our approach: the calculated RMSEs of the two resulting models were 0.089m and 0.074m, respectively.

    @article{jung2018aei,
    title = {Automated 3D volumetric reconstruction of multiple-room building interiors for as-built BIM},
    journal = aei,
    author = {J. Jung and C. Stachniss and S. Ju and J. Heo},
    volume = {38},
    pages = {811-825},
    year = 2018,
    issn = {1474-0346},
    doi = {10.1016/j.aei.2018.10.007},
    _weburl = {https://www.sciencedirect.com/science/article/pii/S1474034618300600},
    abstract = {Currently, fully automated as-built modeling of building interiors using point-cloud data still remains an open challenge, due to several problems that repeatedly arise: (1) complex indoor environments containing multiple rooms; (2) time-consuming and labor-intensive noise filtering; (3) difficulties of representation of volumetric and detail-rich objects such as windows and doors. This study aimed to overcome such limitations while improving the amount of details reproduced within the model for further utilization in BIM. First, we input just the registered three-dimensional (3D) point-cloud data and segmented the point cloud into separate rooms for more effective performance of the later modeling phases for each room. For noise filtering, an offset space from the ceiling height was used to determine whether the scan points belonged to clutter or architectural components. The filtered points were projected onto a binary map in order to trace the floor-wall boundary, which was further refined through subsequent segmentation and regularization procedures. Then, the wall volumes were estimated in two ways: inside- and outside-wall-component modeling. Finally, the wall points were segmented and projected onto an inverse binary map, thereby enabling detection and modeling of the hollow areas as windows or doors. The experimental results on two real-world data sets demonstrated, through comparison with manually-generated models, the effectiveness of our approach: the calculated RMSEs of the two resulting models were 0.089m and 0.074m, respectively.}
    }

  • J. Behley and C. Stachniss, “Efficient Surfel-Based SLAM using 3D Laser Range Data in Urban Environments,” in Proc. of Robotics: Science and Systems (RSS), 2018.
    [BibTeX] [PDF] [Video]
    @InProceedings{behley2018rss,
    author = {J. Behley and C. Stachniss},
    title = {Efficient Surfel-Based SLAM using 3D Laser Range Data in Urban Environments},
    booktitle = rss,
    year = 2018,
    videourl = {https://www.youtube.com/watch?v=-AEX203rXkE},
    url = {https://www.roboticsproceedings.org/rss14/p16.pdf},
    }

  • T. Naseer, W. Burgard, and C. Stachniss, “Robust Visual Localization Across Seasons,” , pp. 1-14, 2018. doi:10.1109/tro.2017.2788045
    [BibTeX] [PDF]
    @Article{naseer2018tro,
    author = {T. Naseer and W. Burgard and C. Stachniss},
    title = {Robust Visual Localization Across Seasons},
    journal = ieeetransrob,
    year = 2018,
    pages = {1-14},
    doi = {10.1109/tro.2017.2788045},
    url = {https://www.ipb.uni-bonn.de/pdfs/naseer2018tro.pdf},
    }

  • B. Della Corte, I. Bogoslavskyi, C. Stachniss, and G. Grisetti, “A General Framework for Flexible Multi-Cue Photometric Point Cloud Registration,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2018.
    [BibTeX] [PDF] [Code] [Video]
    @InProceedings{della-corte2018icra,
    author = {Della Corte, B. and I. Bogoslavskyi and C. Stachniss and G. Grisetti},
    title = {A General Framework for Flexible Multi-Cue Photometric Point Cloud Registration},
    year = 2018,
    booktitle = icra,
    codeurl = {https://gitlab.com/srrg-software/srrg_mpr},
    videourl = {https://www.youtube.com/watch?v=_z98guJTqfk},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/della-corte2018icra.pdf},
    }

  • A. Milioto, P. Lottes, and C. Stachniss, “Real-time Semantic Segmentation of Crop and Weed for Precision Agriculture Robots Leveraging Background Knowledge in CNNs,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2018.
    [BibTeX] [PDF] [Video]

    Precision farming robots, which target to reduce the amount of herbicides that need to be brought out in the fields, must have the ability to identify crops and weeds in real time to trigger weeding actions. In this paper, we address the problem of CNN-based semantic segmentation of crop fields separating sugar beet plants, weeds, and background solely based on RGB data. We propose a CNN that exploits existing vegetation indexes and provides a classification in real time. Furthermore, it can be effectively re-trained to so far unseen fields with a comparably small amount of training data. We implemented and thoroughly evaluated our system on a real agricultural robot operating in different fields in Germany and Switzerland. The results show that our system generalizes well, can operate at around 20Hz, and is suitable for online operation in the fields.

    @InProceedings{milioto2018icra,
    author = {A. Milioto and P. Lottes and C. Stachniss},
    title = {Real-time Semantic Segmentation of Crop and Weed for Precision Agriculture Robots Leveraging Background Knowledge in CNNs},
    year = {2018},
    booktitle = icra,
    abstract = {Precision farming robots, which target to reduce the amount of herbicides that need to be brought out in the fields, must have the ability to identify crops and weeds in real time to trigger weeding actions. In this paper, we address the problem of CNN-based semantic segmentation of crop fields separating sugar beet plants, weeds, and background solely based on RGB data. We propose a CNN that exploits existing vegetation indexes and provides a classification in real time. Furthermore, it can be effectively re-trained to so far unseen fields with a comparably small amount of training data. We implemented and thoroughly evaluated our system on a real agricultural robot operating in different fields in Germany and Switzerland. The results show that our system generalizes well, can operate at around 20Hz, and is suitable for online operation in the fields.},
    url = {https://arxiv.org/abs/1709.06764},
    videourl = {https://youtu.be/DXcTkJmdWFQ},
    }

  • A. Milioto and C. Stachniss, “Bonnet: An Open-Source Training and Deployment Framework for Semantic Segmentation in Robotics using CNNs,” ICRA Worshop on Perception, Inference, and Learning for Joint Semantic, Geometric, and Physical Understanding, 2018.
    [BibTeX] [PDF] [Code] [Video]
    @Article{milioto2018icraws,
    author = {A. Milioto and C. Stachniss},
    title = "{Bonnet: An Open-Source Training and Deployment Framework for Semantic Segmentation in Robotics using CNNs}",
    journal = {ICRA Worshop on Perception, Inference, and Learning for Joint Semantic, Geometric, and Physical Understanding},
    eprint = {1802.08960},
    primaryclass = "cs.RO",
    keywords = {Computer Science - Robotics, Computer Science - Computer Vision and Pattern Recognition},
    year = 2018,
    month = may,
    url = {https://arxiv.org/abs/1802.08960},
    codeurl = {https://github.com/Photogrammetry-Robotics-Bonn/bonnet},
    videourl = {https://www.youtube.com/watch?v=tfeFHCq6YJs},
    }

  • E. Palazzolo and C. Stachniss, “Effective Exploration for MAVs Based on the Expected Information Gain,” Drones, vol. 2, iss. 1, 2018. doi:10.3390/drones2010009
    [BibTeX] [PDF]

    Micro aerial vehicles (MAVs) are an excellent platform for autonomous exploration. Most MAVs rely mainly on cameras for buliding a map of the 3D environment. Therefore, vision-based MAVs require an efficient exploration algorithm to select viewpoints that provide informative measurements. In this paper, we propose an exploration approach that selects in real time the next-best-view that maximizes the expected information gain of new measurements. In addition, we take into account the cost of reaching a new viewpoint in terms of distance and predictability of the flight path for a human observer. Finally, our approach selects a path that reduces the risk of crashes when the expected battery life comes to an end, while still maximizing the information gain in the process. We implemented and thoroughly tested our approach and the experiments show that it offers an improved performance compared to other state-of-the-art algorithms in terms of precision of the reconstruction, execution time, and smoothness of the path.

    @Article{palazzolo2018drones,
    author = {E. Palazzolo and C. Stachniss},
    title = {{Effective Exploration for MAVs Based on the Expected Information Gain}},
    journal = {Drones},
    volume = {2},
    year = {2018},
    number = {1},
    article-number= {9},
    url = {https://www.ipb.uni-bonn.de/pdfs/palazzolo2018drones.pdf},
    issn = {2504-446X},
    abstract = {Micro aerial vehicles (MAVs) are an excellent platform for autonomous exploration. Most MAVs rely mainly on cameras for buliding a map of the 3D environment. Therefore, vision-based MAVs require an efficient exploration algorithm to select viewpoints that provide informative measurements. In this paper, we propose an exploration approach that selects in real time the next-best-view that maximizes the expected information gain of new measurements. In addition, we take into account the cost of reaching a new viewpoint in terms of distance and predictability of the flight path for a human observer. Finally, our approach selects a path that reduces the risk of crashes when the expected battery life comes to an end, while still maximizing the information gain in the process. We implemented and thoroughly tested our approach and the experiments show that it offers an improved performance compared to other state-of-the-art algorithms in terms of precision of the reconstruction, execution time, and smoothness of the path.},
    doi = {10.3390/drones2010009},
    }

  • E. Palazzolo and C. Stachniss, “Fast Image-Based Geometric Change Detection Given a 3D Model,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2018.
    [BibTeX] [PDF] [Code] [Video]
    @InProceedings{palazzolo2018icra,
    title = {{Fast Image-Based Geometric Change Detection Given a 3D Model}},
    author = {E. Palazzolo and C. Stachniss},
    booktitle = icra,
    year = {2018},
    url = {https://www.ipb.uni-bonn.de/pdfs/palazzolo2018icra.pdf},
    codeurl = {https://github.com/PRBonn/fast_change_detection},
    videourl = {https://youtu.be/DEkOYf4Zzh4},
    }

  • K. H. Huang and C. Stachniss, “On Geometric Models and Their Accuracy for Extrinsic Sensor Calibration,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2018.
    [BibTeX] [PDF]
    @InProceedings{huang2018icra,
    author = {K.H. Huang and C. Stachniss},
    title = {On Geometric Models and Their Accuracy for Extrinsic Sensor Calibration},
    booktitle = icra,
    year = 2018,
    url = {https://www.ipb.uni-bonn.de/pdfs/huang2018icra.pdf},
    }

  • A. Walter, R. Khanna, P. Lottes, C. Stachniss, R. Siegwart, J. Nieto, and F. Liebisch, “Flourish – A robotic approach for automation in crop management,” in Proc. of the Intl. Conf. on Precision Agriculture (ICPA), 2018.
    [BibTeX] [PDF]

    The Flourish project aims to bridge the gap between current and desired capabilities of agricultural robots by developing an adaptable robotic solution for precision farming. Combining the aerial survey capabilities of a small autonomous multi-copter Unmanned Aerial Vehicle (UAV) with a multi-purpose agricultural Unmanned Ground Vehicle (UGV), the system will be able to survey a field from the air, perform targeted intervention on the ground, and provide detailed information for decision support, all with minimal user intervention. The system can be adapted to a wide range of farm management activities and to different crops by choosing different sensors, status indicators and ground treatment packages. The research project thereby touches a selection of topics addressed by ICPA such as sensor application in managing in-season crop variability, precision nutrient management and crop protection as well as remote sensing applications in precision agriculture and engineering technologies and advances. This contribution will introduce the Flourish consortium and concept using the results of three years of active development, testing, and measuring in field campaigns. Two key parts of the project will be shown in more detail: First, mapping of the field by drones for detection of sugar beet nitrogen status variation and weed pressure in the field and second the perception of the UGV as related to weed classification and subsequent precision weed management. The field mapping by means of an UAV will be shown for crop nitrogen status estimation and weed pressure with examples for subsequent crop management decision support. For nitrogen status, the results indicate that drones are up to the task to deliver crop nitrogen variability maps utilized for variable rate application that are of comparable quality to current on-tractor systems. The weed pressure mapping is viable as basis for the UGV showcase of precision weed management. For this, we show the automated image acquisition by the UGV and a subsequent plant classification with a four-step pipeline, differentiating crop from weed in real time. Advantages and disadvantages as well as future prospects of such approaches will be discussed.

    @InProceedings{walter2018icpa,
    Title = {Flourish - A robotic approach for automation in crop management},
    Author = {A. Walter and R. Khanna and P. Lottes and C. Stachniss and R. Siegwart and J. Nieto and F. Liebisch},
    Booktitle = icpa,
    Year = 2018,
    abstract = {The Flourish project aims to bridge the gap between current and desired capabilities of agricultural robots by developing an adaptable robotic solution for precision farming. Combining the aerial survey capabilities of a small autonomous multi-copter Unmanned Aerial Vehicle (UAV) with a multi-purpose agricultural Unmanned Ground Vehicle (UGV), the system will be able to survey a field from the air, perform targeted intervention on the ground, and provide detailed information for decision support, all with minimal user intervention. The system can be adapted to a wide range of farm management activities and to different crops by choosing different sensors, status indicators and ground treatment packages. The research project thereby touches a selection of topics addressed by ICPA such as sensor application in managing in-season crop variability, precision nutrient management and crop protection as well as remote sensing applications in precision agriculture and engineering technologies and advances. This contribution will introduce the Flourish consortium and concept using the results of three years of active development, testing, and measuring in field campaigns. Two key parts of the project will be shown in more detail: First, mapping of the field by drones for detection of sugar beet nitrogen status variation and weed pressure in the field and second the perception of the UGV as related to weed classification and subsequent precision weed management. The field mapping by means of an UAV will be shown for crop nitrogen status estimation and weed pressure with examples for subsequent crop management decision support. For nitrogen status, the results indicate that drones are up to the task to deliver crop nitrogen variability maps utilized for variable rate application that are of comparable quality to current on-tractor systems. The weed pressure mapping is viable as basis for the UGV showcase of precision weed management. For this, we show the automated image acquisition by the UGV and a subsequent plant classification with a four-step pipeline, differentiating crop from weed in real time. Advantages and disadvantages as well as future prospects of such approaches will be discussed.},
    }

  • F. Langer, L. Mandtler, A. Milioto, E. Palazzolo, and C. Stachniss, “Geometrical Stem Detection from Image Data for Precision Agriculture,” arXiv Preprint, 2018.
    [BibTeX] [PDF]
    @article{langer2018arxiv,
    author = {F. Langer and L. Mandtler and A. Milioto and E. Palazzolo and C. Stachniss},
    title = {{Geometrical Stem Detection from Image Data for Precision Agriculture}},
    journal = arxiv,
    year = 2018,
    eprint = {1812.05415v1},
    url = {https://arxiv.org/pdf/1812.05415v1},
    keywords = {cs.RO},
    }

  • L. Drees, R. Roscher, and S. Wenzel, “Archetypal Analysis for Sparse Representation-based Hyperspectral Sub-Pixel Quantification,” Photogrammetric Engineering & Remote Sensing, 2018.
    [BibTeX] [PDF]
    @Article{drees2018arxiv,
    author = {Drees, L. and Roscher, R. and Wenzel, S.},
    title = {Archetypal Analysis for Sparse Representation-based Hyperspectral Sub-Pixel Quantification},
    journal = {Photogrammetric Engineering \& Remote Sensing},
    year = {2018},
    note = {accepted},
    url = {https://arxiv.org/abs/1802.02813},
    }

  • K. Franz, R. Roscher, A. Milioto, S. Wenzel, and J. Kusche, “Ocean Eddy Identification and Tracking using Neural Networks,” in IEEE International Geoscience and Remote Sensing Symposium (IGARSS), 2018.
    [BibTeX] [PDF]
    @InProceedings{franz2018ocean,
    author = {Franz, K. and Roscher, R. and Milioto, A. and Wenzel, S. and Kusche, J.},
    title = {Ocean Eddy Identification and Tracking using Neural Networks},
    booktitle = {IEEE International Geoscience and Remote Sensing Symposium (IGARSS)},
    year = {2018},
    note = {accepted},
    url = {https://arxiv.org/abs/arXiv:1803.07436},
    }

  • L. Nardi and C. Stachniss, “Towards Uncertainty-Aware Path Planning for Navigation on Road Networks Using Augmented MDPs,” in 10th Workshop on Planning, Perception and Navigation for Intelligent Vehicles at the IEEE/RSJ Int. Conf. on Intelligent Robots and Systems (IROS), 2018.
    [BibTeX] [PDF] [Video]
    @InProceedings{nardi2018ppniv,
    title = {Towards Uncertainty-Aware Path Planning for Navigation on Road Networks Using Augmented MDPs},
    author = {L. Nardi and C. Stachniss},
    booktitle = {10th Workshop on Planning, Perception and Navigation for Intelligent Vehicles at the IEEE/RSJ Int. Conf. on Intelligent Robots and Systems (IROS)},
    year = {2018},
    videourl = {https://youtu.be/SLp5YVplJAQ}
    }

  • I. Bogoslavskyi, “Robot Mapping and Navigation in Real-World Environments,” PhD Thesis, 2018.
    [BibTeX] [PDF]
    @PhDThesis{bogosalvskyi2018phd,
    author = {I. Bogoslavskyi},
    title = {Robot Mapping and Navigation in Real-World Environments},
    school = {Rheinische Friedrich-Wilhelms University of Bonn},
    year = 2018,
    url = {https://www.ipb.uni-bonn.de/pdfs/bogoslavskyi2018phd.pdf},
    }

  • C. Merfels, “Sensor fusion for localization of automated vehicles,” PhD Thesis, 2018.
    [BibTeX] [PDF]
    @PhDThesis{merfels2018phd,
    author = {C. Merfels},
    title = {{Sensor fusion for localization of automated vehicles}},
    school = {Rheinische Friedrich-Wilhelms University of Bonn},
    year = 2018,
    url = {https://hss.ulb.uni-bonn.de/2018/5276/5276.pdf},
    }

2017

  • C. Beekmans, J. Schneider, T. Laebe, M. Lennefer, C. Stachniss, and C. Simmer, “3D-Cloud Morphology and Motion from Dense Stereo for Fisheye Cameras,” in In Proc. of the European Geosciences Union General Assembly (EGU), 2017.
    [BibTeX] [PDF]
    @InProceedings{beekmans2017egu,
    title = {3D-Cloud Morphology and Motion from Dense Stereo for Fisheye Cameras},
    author = {Ch. Beekmans and J. Schneider and T. Laebe and M. Lennefer and C. Stachniss and C. Simmer},
    booktitle = {In Proc. of the European Geosciences Union General Assembly (EGU)},
    year = {2017},
    }

  • I. Bogoslavskyi and C. Stachniss, “Analyzing the Quality of Matched 3D Point Clouds of Objects,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2017.
    [BibTeX] [PDF]
    [none]
    @InProceedings{bogoslavskyi2017iros,
    title = {Analyzing the Quality of Matched 3D Point Clouds of Objects},
    author = {I. Bogoslavskyi and C. Stachniss},
    booktitle = iros,
    year = {2017},
    abstract = {[none]},
    url = {https://www.ipb.uni-bonn.de/pdfs/bogoslavskyi17iros.pdf},
    }

  • I. Bogoslavskyi and C. Stachniss, “Efficient Online Segmentation for Sparse 3D Laser Scans,” in Journal of Photogrammetry, Remote Sensing and Geoinformation Science (PFG), 2017, p. 41–52.
    [BibTeX] [PDF] [Code] [Video]

    The ability to extract individual objects in the scene is key for a large number of autonomous navigation systems such as mobile robots or autonomous cars. Such systems navigating in dynamic environments need to be aware of objects that may change or move. In most perception cues, a pre-segmentation of the current image or laser scan into individual objects is the first processing step before a further analysis is performed. In this paper, we present an effective method that first removes the ground from the scan and then segments the 3D data in a range image representation into different objects. A key focus of our work is a fast execution with several hundred Hertz. Our implementation has small computational demands so that it can run online on most mobile systems. We explicitly avoid the computation of the 3D point cloud and operate directly on a 2.5D range image, which enables a fast segmentation for each 3D scan. This approach can furthermore handle sparse 3D data well, which is important for scanners such as the new Velodyne VLP-16 scanner. We implemented our approach in C++ and ROS, thoroughly tested it using different 3D scanners, and will release the source code of our implementation. Our method can operate at frame rates that are substantially higher than those of the sensors while using only a single core of a mobile CPU and producing high-quality segmentation results.

    @InProceedings{bogoslavskyi2017pfg,
    title = {Efficient Online Segmentation for Sparse 3D Laser Scans},
    author = {Bogoslavskyi, Igor and Stachniss, Cyrill},
    booktitle = pfg,
    year = {2017},
    pages = {41--52},
    volume = {85},
    issue = {1},
    abstract = {The ability to extract individual objects in the scene is key for a large number of autonomous navigation systems such as mobile robots or autonomous cars. Such systems navigating in dynamic environments need to be aware of objects that may change or move. In most perception cues, a pre-segmentation of the current image or laser scan into individual objects is the first processing step before a further analysis is performed. In this paper, we present an effective method that first removes the ground from the scan and then segments the 3D data in a range image representation into different objects. A key focus of our work is a fast execution with several hundred Hertz. Our implementation has small computational demands so that it can run online on most mobile systems. We explicitly avoid the computation of the 3D point cloud and operate directly on a 2.5D range image, which enables a fast segmentation for each 3D scan. This approach can furthermore handle sparse 3D data well, which is important for scanners such as the new Velodyne VLP-16 scanner. We implemented our approach in C++ and ROS, thoroughly tested it using different 3D scanners, and will release the source code of our implementation. Our method can operate at frame rates that are substantially higher than those of the sensors while using only a single core of a mobile CPU and producing high-quality segmentation results.},
    url = {https://www.ipb.uni-bonn.de/pdfs/bogoslavskyi16pfg.pdf},
    codeurl = {https://github.com/Photogrammetry-Robotics-Bonn/depth_clustering},
    videourl = {https://www.youtube.com/watch?v=6WqsOlHGTLA},
    }

  • D. Bulatov, S. Wenzel, G. Häufel, and J. Meidow, “Chain-Wise Generalization of Road Nerworks Using Model Selection,” in ISPRS Annals of Photogrammetry, Remote Sensing and Spatial Information Sciences, 2017-06-08 2017, p. 59–66. doi:10.5194/isprs-annals-IV-1-W1-59-2017
    [BibTeX] [PDF]

    Streets are essential entities of urban terrain and their automatized extraction from airborne sensor data is cumbersome because of a complex interplay of geometric, topological and semantic aspects. Given a binary image, representing the road class, centerlines of road segments are extracted by means of skeletonization. The focus of this paper lies in a well-reasoned representation of these segments by means of geometric primitives, such as straight line segments as well as circle and ellipse arcs. We propose the fusion of raw segments based on similarity criteria; the output of this process are the so-called chains which better match to the intuitive perception of what a street is. Further, we propose a two-step approach for chain-wise generalization. First, the chain is pre-segmented using circlePeucker and finally, model selection is used to decide whether two neighboring segments should be fused to a new geometric entity. Thereby, we consider both variance-covariance analysis of residuals and model complexity. The results on a complex data-set with many traffic roundabouts indicate the benefits of the proposed procedure.

    @InProceedings{bulatov2017isprs,
    title = {Chain-Wise Generalization of Road Nerworks Using Model Selection},
    author = {Bulatov, D. and Wenzel, S. and H\"aufel, G. and Meidow, J.},
    booktitle = {ISPRS Annals of Photogrammetry, Remote Sensing and Spatial Information Sciences},
    year = {2017},
    pages = {59--66},
    volume = {IV-1/W1},
    abstract = {Streets are essential entities of urban terrain and their automatized extraction from airborne sensor data is cumbersome because of a complex interplay of geometric, topological and semantic aspects. Given a binary image, representing the road class, centerlines of road segments are extracted by means of skeletonization. The focus of this paper lies in a well-reasoned representation of these segments by means of geometric primitives, such as straight line segments as well as circle and ellipse arcs. We propose the fusion of raw segments based on similarity criteria; the output of this process are the so-called chains which better match to the intuitive perception of what a street is. Further, we propose a two-step approach for chain-wise generalization. First, the chain is pre-segmented using circlePeucker and finally, model selection is used to decide whether two neighboring segments should be fused to a new geometric entity. Thereby, we consider both variance-covariance analysis of residuals and model complexity. The results on a complex data-set with many traffic roundabouts indicate the benefits of the proposed procedure.},
    date = {2017-06-08},
    doi = {10.5194/isprs-annals-IV-1-W1-59-2017},
    url = {https://www.ipb.uni-bonn.de/pdfs/Bulaton2017Chain-Wise.pdf},
    }

  • W. Förstner, Some Comments on the Relations of Photogrammetry and Industry, 2017.
    [BibTeX] [PDF]
    @Unpublished{foerstner2017misc,
    title = {{Some Comments on the Relations of Photogrammetry and Industry}},
    author = {W. F{\"o}rstner},
    note = {Note for Photogrammetric Record},
    year = {2017},
    owner = {wf},
    url = {https://www.ipb.uni-bonn.de/pdfs/foerstner17comments.pdf},
    }

  • W. Förstner and K. Khoshelham, “Efficient and Accurate Registration of Point Clouds with Plane to Plane Correspondences,” in 3rd International Workshop on Recovering 6D Object Pose, 2017.
    [BibTeX] [PDF]

    We propose and analyse methods to efficiently register point clouds based on plane correspondences. This is relevant in man-made environments, where most objects are bounded by planar surfaces. Based on a segmentation of the point clouds into planar regions and matches of planes in different point clouds, we (1) optimally estimate the relative pose(s); (2) provide three direct solutions, of which two take the uncertainty of the given planes into account; and (3) analyse the loss in accuracy of the direct solutions as compared to the optimal solution. The paper presents the different solutions, derives their uncertainty especially of the suboptimal direct solutions, and compares their accuracy based on simulated and real data. We show that the direct methods that exploit the uncertainty of the planes lead to a maximum loss of 2.76 in accuracy of the estimated motion parameters in terms of the achieved standard deviations compared to the optimal estimates. We also show that the results are more accurate than the classical iterative closest point and iterative closest plane method, but the estimation procedures have a significantly lower computational complexity. We finally show how to generalize the estimation scheme to simultaneously register multiple point clouds.

    @InProceedings{foerstner2017ws,
    author = {Wolfgang F{\"o}rstner and Kourosh Khoshelham},
    title = {{Efficient and Accurate Registration of Point Clouds with Plane to Plane Correspondences}},
    booktitle = {3rd International Workshop on Recovering 6D Object Pose},
    year = {2017},
    abstract = {We propose and analyse methods to efficiently register point clouds based on plane correspondences. This is relevant in man-made environments, where most objects are bounded by planar surfaces. Based on a segmentation of the point clouds into planar regions and matches of planes in different point clouds, we (1) optimally estimate the relative pose(s); (2) provide three direct solutions, of which two take the uncertainty of the given planes into account; and (3) analyse the loss in accuracy of the direct solutions as compared to the optimal solution. The paper presents the different solutions, derives their uncertainty especially of the suboptimal direct solutions, and compares their accuracy based on simulated and real data. We show that the direct methods that exploit the uncertainty of the planes lead to a maximum loss of 2.76 in accuracy of the estimated motion parameters in terms of the achieved standard deviations compared to the optimal estimates. We also show that the results are more accurate than the classical iterative closest point and iterative closest plane method, but the estimation procedures have a significantly lower computational complexity. We finally show how to generalize the estimation scheme to simultaneously register multiple point clouds.},
    url = {https://www.ipb.uni-bonn.de/pdfs/foerstner17efficient.pdf},
    }

  • W. Förstner and K. Khoshelham, Supplement to: Efficient and Accurate Registration of Point Clouds with Plane to Plane Correspondences, 2017.
    [BibTeX] [PDF]
    @Unpublished{foerstner2017misc,
    title = {{Supplement to: Efficient and Accurate Registration of Point Clouds with Plane to Plane Correspondences}},
    author = {Wolfgang F{\"o}rstner and Kourosh Khoshelham},
    year = {2017},
    url = {https://www.ipb.uni-bonn.de/pdfs/foerstner17efficient_supp.pdf},
    }

  • A. Kicherer, K. Herzog, N. Bendel, H. Klück, A. Backhaus, M. Wieland, J. C. Rose, L. Klingbeil, T. Läbe, C. Hohl, W. Petry, H. Kuhlmann, U. Seiffert, and R. Töpfer, “Phenoliner: A New Field Phenotyping Platform for Grapevine Research,” Sensors, vol. 17, iss. 7, 2017. doi:10.3390/s17071625
    [BibTeX] [PDF]

    In grapevine research the acquisition of phenotypic data is largely restricted to the field due to its perennial nature and size. The methodologies used to assess morphological traits and phenology are mainly limited to visual scoring. Some measurements for biotic and abiotic stress, as well as for quality assessments, are done by invasive measures. The new evolving sensor technologies provide the opportunity to perform non-destructive evaluations of phenotypic traits using different field phenotyping platforms. One of the biggest technical challenges for field phenotyping of grapevines are the varying light conditions and the background. In the present study the Phenoliner is presented, which represents a novel type of a robust field phenotyping platform. The vehicle is based on a grape harvester following the concept of a moveable tunnel. The tunnel it is equipped with different sensor systems (RGB and NIR camera system, hyperspectral camera, RTK-GPS, orientation sensor) and an artificial broadband light source. It is independent from external light conditions and in combination with artificial background, the Phenoliner enables standardised acquisition of high-quality, geo-referenced sensor data.

    @Article{kicherer2017phenoliner,
    author = {Kicherer, Anna and Herzog, Katja and Bendel, Nele and Klück, Hans-Christian and Backhaus, Andreas and Wieland, Markus and Rose, Johann Christian and Klingbeil, Lasse and Läbe, Thomas and Hohl, Christian and Petry, Willi and Kuhlmann, Heiner and Seiffert, Udo and Töpfer, Reinhard},
    title = {Phenoliner: A New Field Phenotyping Platform for Grapevine Research},
    journal = {Sensors},
    volume = {17},
    year = {2017},
    number = {7},
    url = {https://www.mdpi.com/1424-8220/17/7/1625/pdf},
    issn = {1424-8220},
    abstract = {In grapevine research the acquisition of phenotypic data is largely restricted to the field due to its perennial nature and size. The methodologies used to assess morphological traits and phenology are mainly limited to visual scoring. Some measurements for biotic and abiotic stress, as well as for quality assessments, are done by invasive measures. The new evolving sensor technologies provide the opportunity to perform non-destructive evaluations of phenotypic traits using different field phenotyping platforms. One of the biggest technical challenges for field phenotyping of grapevines are the varying light conditions and the background. In the present study the Phenoliner is presented, which represents a novel type of a robust field phenotyping platform. The vehicle is based on a grape harvester following the concept of a moveable tunnel. The tunnel it is equipped with different sensor systems (RGB and NIR camera system, hyperspectral camera, RTK-GPS, orientation sensor) and an artificial broadband light source. It is independent from external light conditions and in combination with artificial background, the Phenoliner enables standardised acquisition of high-quality, geo-referenced sensor data.},
    doi = {10.3390/s17071625},
    }

  • F. Liebisch, M. Popovic, J. Pfeifer, R. Khanna, P. Lottes, C. Stachniss, A. Pretto, I. S. Kyu, J. Nieto, R. Siegwart, and A. Walter, “Automatic UAV-based field inspection campaigns for weeding in row crops,” in Proc. of the 10th EARSeL SIG Imaging Spectroscopy Workshop, 2017.
    [BibTeX]
    @InProceedings{liebisch2017earsel,
    title = {Automatic UAV-based field inspection campaigns for weeding in row crops},
    author = {F. Liebisch and M. Popovic and J. Pfeifer and R. Khanna and P. Lottes and C. Stachniss and A. Pretto and S. In Kyu and J. Nieto and R. Siegwart and A. Walter},
    booktitle = {Proc. of the 10th EARSeL SIG Imaging Spectroscopy Workshop},
    year = {2017},
    }

  • P. Lottes, M. Höferlin, S. Sander, and C. Stachniss, “Effective Vision-based Classification for Separating Sugar Beets and Weeds for Precision Farming,” Journal of Field Robotics, vol. 34, pp. 1160-1178, 2017. doi:10.1002/rob.21675
    [BibTeX] [PDF]
    @Article{lottes2017jfr,
    title = {Effective Vision-based Classification for Separating Sugar Beets and Weeds for Precision Farming},
    author = {Lottes, Philipp and H\"oferlin, Markus and Sander, Slawomir and Stachniss, Cyrill},
    journal = {Journal of Field Robotics},
    year = {2017},
    volume = {34},
    issue = {6},
    pages = {1160-1178},
    doi = {10.1002/rob.21675},
    issn = {1556-4967},
    timestamp = {2016.10.5},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/lottes16jfr.pdf},
    }

  • N. Chebrolu, P. Lottes, A. Schaefer, W. Winterhalter, W. Burgard, and C. Stachniss, “Agricultural robot dataset for plant classification, localization and mapping on sugar beet fields,” Intl. Journal of Robotics Research (IJRR), 2017. doi:10.1177/0278364917720510
    [BibTeX] [PDF]
    @Article{chebrolu2017ijrr,
    title = {Agricultural robot dataset for plant classification, localization and mapping on sugar beet fields},
    author = {N. Chebrolu and P. Lottes and A. Schaefer and W. Winterhalter and W. Burgard and C. Stachniss},
    journal = ijrr,
    year = {2017},
    doi = {10.1177/0278364917720510},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/chebrolu2017ijrr.pdf},
    }

  • P. Lottes, R. Khanna, J. Pfeifer, R. Siegwart, and C. Stachniss, “UAV-Based Crop and Weed Classification for Smart Farming,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2017.
    [BibTeX] [PDF]
    @InProceedings{lottes2017icra,
    title = {UAV-Based Crop and Weed Classification for Smart Farming},
    author = {P. Lottes and R. Khanna and J. Pfeifer and R. Siegwart and C. Stachniss},
    booktitle = icra,
    year = {2017},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/lottes17icra.pdf},
    }

  • P. Lottes and C. Stachniss, “Semi-Supervised Online Visual Crop and Weed Classification in Precision Farming Exploiting Plant Arrangement,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2017.
    [BibTeX] [PDF]
    @InProceedings{lottes2017iros,
    title = {Semi-Supervised Online Visual Crop and Weed Classification in Precision Farming Exploiting Plant Arrangement},
    author = {P. Lottes and C. Stachniss},
    booktitle = iros,
    year = {2017},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/lottes17iros.pdf},
    }

  • C. Merfels and C. Stachniss, “Sensor Fusion for Self-Localisation of Automated Vehicles,” Journal of Photogrammetry, Remote Sensing and Geoinformation Science (PFG), 2017.
    [BibTeX] [PDF]
    @Article{merfels2017pfg,
    title = {Sensor Fusion for Self-Localisation of Automated Vehicles},
    author = {Merfels, C. and Stachniss, C.},
    journal = pfg,
    year = {2017},
    url = {https://link.springer.com/article/10.1007/s41064-017-0008-1},
    }

  • A. Milioto, P. Lottes, and C. Stachniss, “Real-time Blob-wise Sugar Beets vs Weeds Classification for Monitoring Fields using Convolutional Neural Networks,” in Proc. of the Intl. Conf. on Unmanned Aerial Vehicles in Geomatics, 2017.
    [BibTeX] [PDF]

    UAVs are becoming an important tool for field monitoring and precision farming. A prerequisite for observing and analyzing fields is the ability to identify crops and weeds from image data. In this paper, we address the problem of detecting the sugar beet plants and weeds in the field based solely on image data. We propose a system that combines vegetation detection and deep learning to obtain a high-quality classification of the vegetation in the field into value crops and weeds. We implemented and thoroughly evaluated our system on image data collected from different sugar beet fields and illustrate that our approach allows for accurately identifying the weeds on the field.

    @InProceedings{milioto2017uavg,
    title = {Real-time Blob-wise Sugar Beets vs Weeds Classification for Monitoring Fields using Convolutional Neural Networks},
    author = {A. Milioto and P. Lottes and C. Stachniss},
    booktitle = uavg,
    year = {2017},
    abstract = {UAVs are becoming an important tool for field monitoring and precision farming. A prerequisite for observing and analyzing fields is the ability to identify crops and weeds from image data. In this paper, we address the problem of detecting the sugar beet plants and weeds in the field based solely on image data. We propose a system that combines vegetation detection and deep learning to obtain a high-quality classification of the vegetation in the field into value crops and weeds. We implemented and thoroughly evaluated our system on image data collected from different sugar beet fields and illustrate that our approach allows for accurately identifying the weeds on the field.},
    url = {https://www.ipb.uni-bonn.de/pdfs/milioto17uavg.pdf},
    }

  • L. Nardi and C. Stachniss, “User Preferred Behaviors for Robot Navigation Exploiting Previous Experiences,” in Journal on Robotics and Autonomous Systems (RAS), 2017. doi:10.1016/j.robot.2017.08.014
    [BibTeX] [PDF]

    Industry demands flexible robots that are able to accomplish different tasks at different locations such as navigation and mobile manipulation. Operators often require mobile robots operating on factory floors to follow definite and predictable behaviors. This becomes particularly important when a robot shares the workspace with other moving entities. In this paper, we present a system for robot navigation that exploits previous experiences to generate predictable behaviors that meet user’s preferences. Preferences are not explicitly formulated but implicitly extracted from robot experiences and automatically considered to plan paths for the successive tasks without requiring experts to hard-code rules or strategies. Our system aims at accomplishing navigation behaviors that follow user’s preferences also to avoid dynamic obstacles. We achieve this by considering a probabilistic approach for modeling uncertain trajectories of the moving entities that share the workspace with the robot. We implemented and thoroughly tested our system both in simulation and on a real mobile robot. The extensive experiments presented in this paper demonstrate that our approach allows a robot for successfully navigating while performing predictable behaviors and meeting user’s preferences

    @InProceedings{nardi2017jras,
    title = {User Preferred Behaviors for Robot Navigation Exploiting Previous Experiences},
    author = {L. Nardi and C. Stachniss},
    booktitle = jras,
    year = {2017},
    doi = {10.1016/j.robot.2017.08.014},
    abstract = {Industry demands flexible robots that are able to accomplish different tasks at different locations such as navigation and mobile manipulation. Operators often require mobile robots operating on factory floors to follow definite and predictable behaviors. This becomes particularly important when a robot shares the workspace with other moving entities. In this paper, we present a system for robot navigation that exploits previous experiences to generate predictable behaviors that meet user’s preferences. Preferences are not explicitly formulated but implicitly extracted from robot experiences and automatically considered to plan paths for the successive tasks without requiring experts to hard-code rules or strategies. Our system aims at accomplishing navigation behaviors that follow user’s preferences also to avoid dynamic obstacles. We achieve this by considering a probabilistic approach for modeling uncertain trajectories of the moving entities that share the workspace with the robot. We implemented and thoroughly tested our system both in simulation and on a real mobile robot. The extensive experiments presented in this paper demonstrate that our approach allows a robot for successfully navigating while performing predictable behaviors and meeting user’s preferences},
    url = {https://www.ipb.uni-bonn.de/pdfs/nardi17jras.pdf},
    }

  • E. Palazzolo and C. Stachniss, “Information-Driven Autonomous Exploration for a Vision-Based MAV,” in ISPRS Annals of the Photogrammetry, Remote Sensing and Spatial Information Sciences, 2017.
    [BibTeX] [PDF]
    @InProceedings{palazzolo2017uavg,
    title = {Information-Driven Autonomous Exploration for a Vision-Based MAV},
    author = {E. Palazzolo and C. Stachniss},
    booktitle = {ISPRS Annals of the Photogrammetry, Remote Sensing and Spatial Information Sciences},
    year = {2017},
    url = {https://www.ipb.uni-bonn.de/pdfs/palazzolo2017uavg.pdf},
    }

  • E. Palazzolo and C. Stachniss, “Change Detection in 3D Models Based on Camera Images,” in 9th Workshop on Planning, Perception and Navigation for Intelligent Vehicles at the IEEE/RSJ Int. Conf. on Intelligent Robots and Systems (IROS), 2017.
    [BibTeX] [PDF]
    @InProceedings{palazzolo2017irosws,
    title = {Change Detection in 3D Models Based on Camera Images},
    author = {E. Palazzolo and C. Stachniss},
    booktitle = {9th Workshop on Planning, Perception and Navigation for Intelligent Vehicles at the IEEE/RSJ Int. Conf. on Intelligent Robots and Systems (IROS)},
    year = {2017},
    url = {https://www.ipb.uni-bonn.de/pdfs/palazzolo2017irosws},
    }

  • J. Schneider, C. Stachniss, and W. Förstner, “On the Quality and Efficiency of Approximate Solutions to Bundle Adjustment with Epipolar and Trifocal Constraints,” in ISPRS Annals of Photogrammetry, Remote Sensing and Spatial Information Sciences, 2017, pp. 81-88. doi:10.5194/isprs-annals-IV-2-W3-81-2017
    [BibTeX] [PDF]

    Bundle adjustment is a central part of most visual SLAM and Structure from Motion systems and thus a relevant component of UAVs equipped with cameras. This paper makes two contributions to bundle adjustment. First, we present a novel approach which exploits trifocal constraints, i.e., constraints resulting from corresponding points observed in three camera images, which allows to estimate the camera pose parameters without 3D point estimation. Second, we analyze the quality loss compared to the optimal bundle adjustment solution when applying different types of approximations to the constrained optimization problem to increase efficiency. We implemented and thoroughly evaluated our approach using a UAV performing mapping tasks in outdoor environments. Our results indicate that the complexity of the constraint bundle adjustment can be decreased without loosing too much accuracy.

    @InProceedings{schneider2017uavg,
    title = {On the Quality and Efficiency of Approximate Solutions to Bundle Adjustment with Epipolar and Trifocal Constraints},
    author = {J. Schneider and C. Stachniss and W. F\"orstner},
    booktitle = {ISPRS Annals of Photogrammetry, Remote Sensing and Spatial Information Sciences},
    year = {2017},
    pages = {81-88},
    volume = {IV-2/W3},
    abstract = {Bundle adjustment is a central part of most visual SLAM and Structure from Motion systems and thus a relevant component of UAVs equipped with cameras. This paper makes two contributions to bundle adjustment. First, we present a novel approach which exploits trifocal constraints, i.e., constraints resulting from corresponding points observed in three camera images, which allows to estimate the camera pose parameters without 3D point estimation. Second, we analyze the quality loss compared to the optimal bundle adjustment solution when applying different types of approximations to the constrained optimization problem to increase efficiency. We implemented and thoroughly evaluated our approach using a UAV performing mapping tasks in outdoor environments. Our results indicate that the complexity of the constraint bundle adjustment can be decreased without loosing too much accuracy.},
    doi = {10.5194/isprs-annals-IV-2-W3-81-2017},
    url = {https://www.isprs-ann-photogramm-remote-sens-spatial-inf-sci.net/IV-2-W3/81/2017/isprs-annals-IV-2-W3-81-2017.pdf},
    }

  • O. Vysotska and C. Stachniss, “Improving SLAM by Exploiting Building Information from Publicly Available Maps and Localization Priors,” Journal of Photogrammetry, Remote Sensing and Geoinformation Science (PFG), vol. 85, iss. 1, pp. 53-65, 2017.
    [BibTeX] [PDF] [Video]
    @Article{vysotska2017pfg,
    title = {Improving SLAM by Exploiting Building Information from Publicly Available Maps and Localization Priors},
    author = {Vysotska, O. and Stachniss, C.},
    journal = pfg,
    year = {2017},
    number = {1},
    pages = {53-65},
    volume = {85},
    url = {https://www.ipb.uni-bonn.de/pdfs/vysotska2016pfg.pdf},
    videourl = {https://www.youtube.com/watch?v=dKHlF3OkEV4},
    }

  • O. Vysotska and C. Stachniss, “Relocalization under Substantial Appearance Changes using Hashing,” in IROS Workshop on Planning, Perception and Navigation for Intelligent Vehicles, 2017.
    [BibTeX] [PDF] [Code]
    [none]
    @InProceedings{vysotska2017irosws,
    title = {Relocalization under Substantial Appearance Changes using Hashing},
    author = {O. Vysotska and C. Stachniss},
    booktitle = {IROS Workshop on Planning, Perception and Navigation for Intelligent Vehicles},
    year = {2017},
    abstract = {[none]},
    url = {https://www.ipb.uni-bonn.de/pdfs/vysotska2017irosws.pdf},
    codeurl = {https://github.com/Photogrammetry-Robotics-Bonn/vpr_relocalization},
    }

  • J. Jung, C. Stachniss, and C. Kim, “Automatic room segmentation of 3D laser data using morphological processing,” ISPRS International Journal of Geo-Information, 2017.
    [BibTeX] [PDF]
    @Article{jung2017ijgi,
    author = {J. Jung and C. Stachniss and C. Kim},
    title = {Automatic room segmentation of 3D laser data using morphological processing},
    journal = {ISPRS International Journal of Geo-Information},
    year = {2017},
    url = {https://www.mdpi.com/2220-9964/6/7/206},
    }

  • R. Schirmer, P. Biber, and C. Stachniss, “Efficient Path Planning in Belief Space for Safe Navigation,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2017.
    [BibTeX] [PDF]

    Robotic lawn-mowers are required to stay within a predefined working area, otherwise they may drive into a pond or on the street. This turns navigation and path planning into safety critical components. If we consider using SLAM techniques in that context, we must be able to provide safety guarantees in the presence of sensor/actuator noise and featureless areas in the environment. In this paper, we tackle the problem of planning a path that maximizes robot safety while navigating inside the working area and under the constraints of limited computing resources and cheap sensors. Our approach uses a map of the environment to estimate localizability at all locations, and it uses these estimates to search for a path from start to goal in belief space using an extended heuristic search algorithm. We implemented our approach using C++ and ROS and thoroughly tested it on simulation data recorded on eight different gardens, as well as on a real robot. The experiments presented in this paper show that our approach leads to short computation times and short paths while maximizing robot safety under certain assumptions.

    @InProceedings{schirmer2017iros,
    author = {R. Schirmer and P. Biber and C. Stachniss},
    title = {Efficient Path Planning in Belief Space for Safe Navigation},
    booktitle = iros,
    year = {2017},
    abstract = {Robotic lawn-mowers are required to stay within a predefined working area, otherwise they may drive into a pond or on the street. This turns navigation and path planning into safety critical components. If we consider using SLAM techniques in that context, we must be able to provide safety guarantees in the presence of sensor/actuator noise and featureless areas in the environment. In this paper, we tackle the problem of planning a path that maximizes robot safety while navigating inside the working area and under the constraints of limited computing resources and cheap sensors. Our approach uses a map of the environment to estimate localizability at all locations, and it uses these estimates to search for a path from start to goal in belief space using an extended heuristic search algorithm. We implemented our approach using C++ and ROS and thoroughly tested it on simulation data recorded on eight different gardens, as well as on a real robot. The experiments presented in this paper show that our approach leads to short computation times and short paths while maximizing robot safety under certain assumptions.},
    url = {https://www.ipb.uni-bonn.de/pdfs/schirmer17iros.pdf},
    }

  • K. H. Huang and C. Stachniss, “Extrinsic Multi-Sensor Calibration For Mobile Robots Using the Gauss-Helmert Model,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2017.
    [BibTeX] [PDF]
    @InProceedings{huang2017iros,
    author = {K.H. Huang and C. Stachniss},
    title = {Extrinsic Multi-Sensor Calibration For Mobile Robots Using the Gauss-Helmert Model},
    booktitle = iros,
    year = 2017,
    url = {https://www.ipb.uni-bonn.de/pdfs/huang2017iros.pdf},
    }

  • A. Bettge, R. Roscher, and S. Wenzel, “Deep self-taught learning for remote sensing image classification,” in Proc. Conf. on Big Data from Space, 2017. doi:10.2760/383579
    [BibTeX] [PDF]

    This paper addresses the land cover classification task for remote sensing images by deep self-taught learning. Our self-taught learning approach learns suitable feature representations of the input data using sparse representation and undercomplete dictionary learning. We propose a deep learning framework which extracts representations in multiple layers and use the output of the deepest layer as input to a classification algorithm. We evaluate our approach using a multispectral Landsat 5 TM image of a study area in the North of Novo Progresso (South America) and the Zurich Summer Data Set provided by the University of Zurich. Experiments indicate that features learned by a deep self-taught learning framework can be used for classification and improve the results compared to classification results using the original feature representation.

    @InProceedings{bettge2017bids,
    author = {Bettge, A. and Roscher, R. and Wenzel, S.},
    title = {Deep self-taught learning for remote sensing image classification},
    booktitle = {Proc. Conf. on Big Data from Space},
    year = {2017},
    abstract = {This paper addresses the land cover classification task for remote sensing images by deep self-taught learning. Our self-taught learning approach learns suitable feature representations of the input data using sparse representation and undercomplete dictionary learning. We propose a deep learning framework which extracts representations in multiple layers and use the output of the deepest layer as input to a classification algorithm. We evaluate our approach using a multispectral Landsat 5 TM image of a study area in the North of Novo Progresso (South America) and the Zurich Summer Data Set provided by the University of Zurich. Experiments indicate that features learned by a deep self-taught learning framework can be used for classification and improve the results compared to classification results using the original feature representation.},
    doi = {10.2760/383579},
    url = {https://publications.jrc.ec.europa.eu/repository/bitstream/JRC108361/jrc180361_procbids17.pdf},
    }

  • A. Braakmann-Folgmann, R. Roscher, S. Wenzel, B. Uebbing, and J. Kusche, “Sea level anomaly prediction using recurrent neural networks,” in Proc. of the Conf. on Big Data from Space, 2017. doi:10.2760/383579
    [BibTeX] [PDF]

    Sea level change, one of the most dire impacts of anthropogenic global warming, will affect a large amount of the world’s population. However, sea level change is not uniform in time and space, and the skill of conventional prediction methods is limited due to the ocean’s internal variabi-lity on timescales from weeks to decades. Here we study the potential of neural network methods which have been used successfully in other applications, but rarely been applied for this task. We develop a combination of a convolutional neural network (CNN) and a recurrent neural network (RNN) to analyse both the spatial and the temporal evolution of sea level and to suggest an independent, accurate method to predict interannual sea level anomalies (SLA). We test our method for the northern and equatorial Pacific Ocean, using gridded altimeter-derived SLA data. We show that the used network designs outperform a simple regression and that adding a CNN improves the skill significantly. The predictions are stable over several years.

    @InProceedings{braakmann-folgmann2017bids,
    author = {Braakmann-Folgmann, A. and Roscher, R. and Wenzel, S. and Uebbing, B. and Kusche, J.},
    title = {Sea level anomaly prediction using recurrent neural networks},
    booktitle = {Proc. of the Conf. on Big Data from Space},
    year = {2017},
    abstract = {Sea level change, one of the most dire impacts of anthropogenic global warming, will affect a large amount of the world's population. However, sea level change is not uniform in time and space, and the skill of conventional prediction methods is limited due to the ocean's internal variabi-lity on timescales from weeks to decades. Here we study the potential of neural network methods which have been used successfully in other applications, but rarely been applied for this task. We develop a combination of a convolutional neural network (CNN) and a recurrent neural network (RNN) to analyse both the spatial and the temporal evolution of sea level and to suggest an independent, accurate method to predict interannual sea level anomalies (SLA). We test our method for the northern and equatorial Pacific Ocean, using gridded altimeter-derived SLA data. We show that the used network designs outperform a simple regression and that adding a CNN improves the skill significantly. The predictions are stable over several years.},
    doi = {10.2760/383579},
    url = {https://publications.jrc.ec.europa.eu/repository/bitstream/JRC108361/jrc180361_procbids17.pdf},
    }

  • R. Roscher, L. Drees, and S. Wenzel, “Sparse representation-based archetypal graphs for spectral clustering,” in IEEE International Geoscience and Remote Sensing Symposium, 2017.
    [BibTeX] [PDF]
    @InProceedings{roscher2017igrss,
    author = {Roscher, R. and Drees, L. and Wenzel, S.},
    title = {Sparse representation-based archetypal graphs for spectral clustering},
    booktitle = {IEEE International Geoscience and Remote Sensing Symposium},
    year = {2017},
    owner = {ribana},
    timestamp = {2017.12.08},
    url = {https://www.researchgate.net/publication/321680475_Sparse_representation-based_archetypal_graphs_for_spectral_clustering},
    }

2016

  • N. Abdo, C. Stachniss, L. Spinello, and W. Burgard, “Organizing Objects by Predicting User Preferences Through Collaborative Filtering,” Intl. Journal of Robotics Research (IJRR), 2016.
    [BibTeX] [PDF]
    [none]
    @Article{abdo16ijrr,
    title = {Organizing Objects by Predicting User Preferences Through Collaborative Filtering},
    author = {N. Abdo and C. Stachniss and L. Spinello and W. Burgard},
    journal = ijrr,
    year = {2016},
    note = {arXiv:1512.06362},
    abstract = {[none]},
    url = {https://arxiv.org/abs/1512.06362},
    }

  • C. Beekmans, J. Schneider, T. Läbe, M. Lennefer, C. Stachniss, and C. Simmer, “Cloud Photogrammetry with Dense Stereo for Fisheye Cameras,” Atmospheric Chemistry and Physics (ACP), vol. 16, iss. 22, pp. 14231-14248, 2016. doi:10.5194/acp-16-14231-2016
    [BibTeX] [PDF]

    We present a novel approach for dense 3-D cloud reconstruction above an area of 10 × 10 km2 using two hemispheric sky imagers with fisheye lenses in a stereo setup. We examine an epipolar rectification model designed for fisheye cameras, which allows the use of efficient out-of-the-box dense matching algorithms designed for classical pinhole-type cameras to search for correspondence information at every pixel. The resulting dense point cloud allows to recover a detailed and more complete cloud morphology compared to previous approaches that employed sparse feature-based stereo or assumed geometric constraints on the cloud field. Our approach is very efficient and can be fully automated. From the obtained 3-D shapes, cloud dynamics, size, motion, type and spacing can be derived, and used for radiation closure under cloudy conditions, for example. Fisheye lenses follow a different projection function than classical pinhole-type cameras and provide a large field of view with a single image. However, the computation of dense 3-D information is more complicated and standard implementations for dense 3-D stereo reconstruction cannot be easily applied. Together with an appropriate camera calibration, which includes internal camera geometry, global position and orientation of the stereo camera pair, we use the correspondence information from the stereo matching for dense 3-D stereo reconstruction of clouds located around the cameras. We implement and evaluate the proposed approach using real world data and present two case studies. In the first case, we validate the quality and accuracy of the method by comparing the stereo reconstruction of a stratocumulus layer with reflectivity observations measured by a cloud radar and the cloud-base height estimated from a Lidar-ceilometer. The second case analyzes a rapid cumulus evolution in the presence of strong wind shear.

    @Article{beekmans16acp,
    title = {Cloud Photogrammetry with Dense Stereo for Fisheye Cameras},
    author = {C. Beekmans and J. Schneider and T. L\"abe and M. Lennefer and C. Stachniss and C. Simmer},
    journal = {Atmospheric Chemistry and Physics (ACP)},
    year = {2016},
    number = {22},
    pages = {14231-14248},
    volume = {16},
    abstract = {We present a novel approach for dense 3-D cloud reconstruction above an area of 10 × 10 km2 using two hemispheric sky imagers with fisheye lenses in a stereo setup. We examine an epipolar rectification model designed for fisheye cameras, which allows the use of efficient out-of-the-box dense matching algorithms designed for classical pinhole-type cameras to search for correspondence information at every pixel. The resulting dense point cloud allows to recover a detailed and more complete cloud morphology compared to previous approaches that employed sparse feature-based stereo or assumed geometric constraints on the cloud field. Our approach is very efficient and can be fully automated. From the obtained 3-D shapes, cloud dynamics, size, motion, type and spacing can be derived, and used for radiation closure under cloudy conditions, for example. Fisheye lenses follow a different projection function than classical pinhole-type cameras and provide a large field of view with a single image. However, the computation of dense 3-D information is more complicated and standard implementations for dense 3-D stereo reconstruction cannot be easily applied. Together with an appropriate camera calibration, which includes internal camera geometry, global position and orientation of the stereo camera pair, we use the correspondence information from the stereo matching for dense 3-D stereo reconstruction of clouds located around the cameras. We implement and evaluate the proposed approach using real world data and present two case studies. In the first case, we validate the quality and accuracy of the method by comparing the stereo reconstruction of a stratocumulus layer with reflectivity observations measured by a cloud radar and the cloud-base height estimated from a Lidar-ceilometer. The second case analyzes a rapid cumulus evolution in the presence of strong wind shear.},
    doi = {10.5194/acp-16-14231-2016},
    url = {https://www.ipb.uni-bonn.de/pdfs/beekmans16acp.pdf},
    }

  • I. Bogoslavskyi, M. Mazuran, and C. Stachniss, “Robust Homing for Autonomous Robots,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2016.
    [BibTeX] [PDF] [Video]
    [none]
    @InProceedings{bogoslavskyi16icra,
    title = {Robust Homing for Autonomous Robots},
    author = {I. Bogoslavskyi and M. Mazuran and C. Stachniss},
    booktitle = icra,
    year = {2016},
    abstract = {[none]},
    url = {https://www.ipb.uni-bonn.de/pdfs/bogoslavskyi16icra.pdf},
    videourl = {https://www.youtube.com/watch?v=sUvDvq91Vpw},
    }

  • I. Bogoslavskyi and C. Stachniss, “Fast Range Image-Based Segmentation of Sparse 3D Laser Scans for Online Operation,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2016.
    [BibTeX] [PDF] [Code] [Video]
    [none]
    @InProceedings{bogoslavskyi16iros,
    title = {Fast Range Image-Based Segmentation of Sparse 3D Laser Scans for Online Operation},
    author = {I. Bogoslavskyi and C. Stachniss},
    booktitle = iros,
    year = {2016},
    abstract = {[none]},
    url = {https://www.ipb.uni-bonn.de/pdfs/bogoslavskyi16iros.pdf},
    codeurl = {https://github.com/Photogrammetry-Robotics-Bonn/depth_clustering},
    videourl = {https://www.youtube.com/watch?v=6WqsOlHGTLA},
    }

  • W. Förstner, “A Future for Learning Semantic Models of Man-Made Environments,” in Proc. of Int. Conf. on Pattern Recognition (ICPR), 2016.
    [BibTeX] [PDF]

    Deriving semantic 3D models of man-made environments hitherto has not reached the desired maturity which makes human interaction obsolete. Man-made environments play a central role in navigation, city planning, building management systems, disaster management or augmented reality. They are characterised by rich geometric and semantic structures. These cause conceptual problems when learning generic models or when developing automatic acquisition systems. The problems appear to be caused by (1) the incoherence of the models for signal analysis, (2) the type of interplay between discrete and continuous geometric representations, (3) the inefficiency of the interaction between crisp models, such as partonomies and taxonomies, and soft models, mostly having a probabilistic nature, and (4) the vagueness of the used notions in the envisaged application domains. The paper wants to encourage the development and learning of generative models, specifically for man-made objects, to be able to understand, reason about, and explain interpretations.

    @InProceedings{foerstner2016future,
    title = {{A Future for Learning Semantic Models of Man-Made Environments}},
    author = {W. F{\"o}rstner},
    booktitle = {Proc. of Int. Conf. on Pattern Recognition (ICPR)},
    year = {2016},
    abstract = {Deriving semantic 3D models of man-made environments hitherto has not reached the desired maturity which makes human interaction obsolete. Man-made environments play a central role in navigation, city planning, building management systems, disaster management or augmented reality. They are characterised by rich geometric and semantic structures. These cause conceptual problems when learning generic models or when developing automatic acquisition systems. The problems appear to be caused by (1) the incoherence of the models for signal analysis, (2) the type of interplay between discrete and continuous geometric representations, (3) the inefficiency of the interaction between crisp models, such as partonomies and taxonomies, and soft models, mostly having a probabilistic nature, and (4) the vagueness of the used notions in the envisaged application domains. The paper wants to encourage the development and learning of generative models, specifically for man-made objects, to be able to understand, reason about, and explain interpretations.},
    url = {https://www.ipb.uni-bonn.de/pdfs/foerstner16Future.pdf},
    }

  • W. Förstner and B. P. Wrobel, Photogrammetric Computer Vision – Statistics, Geometry, Orientation and Reconstruction, Springer, 2016.
    [BibTeX]
    @Book{foerstner2016photogrammetric,
    title = {{Photogrammetric Computer Vision -- Statistics, Geometry, Orientation and Reconstruction}},
    author = {W. F{\"o}rstner and B. P. Wrobel},
    publisher = {Springer},
    year = {2016},
    }

  • B. Franke, J. Plante, R. Roscher, A. Lee, C. Smyth, A. Hatefi, F. Chen, E. Gil, A. Schwing, A. Selvitella, M. M. Hoffman, R. Grosse, D. Hendricks, and N. Reid, “Statistical Inference, Learning and Models in Big Data,” International Statistical Review, 2016.
    [BibTeX] [PDF]

    Big data provides big opportunities for statistical inference, but perhaps even bigger challenges, often related to differences in volume, variety, velocity, and veracity of information when compared to smaller carefully collected datasets. From January to June, 2015, the Canadian Institute of Statistical Sciences organized a thematic program on Statistical Inference, Learning and Models in Big Data. This paper arose from presentations and discussions that took place during the thematic program.

    @Article{franke2016bigdata,
    title = {Statistical Inference, Learning and Models in Big Data},
    author = {Franke, Beate and Plante, Jean-Fran\c{c}ois and Roscher, Ribana and Lee, Annie and Smyth, Cathal and Hatefi, Armin and Chen, Fuqi and Gil, Einat and Schwing, Alex and Selvitella, Alessandro and Hoffman, Michael M. and Grosse, Roger and Hendricks, Dieter and Reid, Nancy},
    journal = {International Statistical Review},
    year = {2016},
    note = {to appear},
    abstract = {Big data provides big opportunities for statistical inference, but perhaps even bigger challenges, often related to differences in volume, variety, velocity, and veracity of information when compared to smaller carefully collected datasets. From January to June, 2015, the Canadian Institute of Statistical Sciences organized a thematic program on Statistical Inference, Learning and Models in Big Data. This paper arose from presentations and discussions that took place during the thematic program.},
    owner = {ribana},
    timestamp = {2016.03.01},
    url = {https://onlinelibrary.wiley.com/doi/10.1111/insr.12176/full},
    }

  • M. Laîné, S. Cruciani, E. Palazzolo, N. J. Britton, X. Cavarelli, and K. Yoshida, “Navigation System for a Small Size Lunar Exploration Rover with a Monocular Omnidirectional Camera,” in Proc. SPIE, 2016. doi:10.1117/12.2242871
    [BibTeX]
    @InProceedings{laine16spie,
    author = { M. La{\^{i}}n{\'{e}} and S. Cruciani and E. Palazzolo and N.J. Britton and X. Cavarelli and K. Yoshida},
    title = {Navigation System for a Small Size Lunar Exploration Rover with a Monocular Omnidirectional Camera},
    booktitle = {Proc. SPIE},
    volume = {10011},
    year = {2016},
    doi = {10.1117/12.2242871},
    }

  • F. Liebisch, J. Pfeifer, R. Khanna, P. Lottes, C. Stachniss, T. Falck, S. Sander, R. Siegwart, A. Walter, and E. Galceran, “Flourish – A robotic approach for automation in crop management,” in Proc. of the Workshop für Computer-Bildanalyse und unbemannte autonom fliegende Systeme in der Landwirtschaft, 2016.
    [BibTeX] [PDF]
    @InProceedings{liebisch16wslw,
    title = {Flourish -- A robotic approach for automation in crop management},
    author = {F. Liebisch and J. Pfeifer and R. Khanna and P. Lottes and C. Stachniss and T. Falck and S. Sander and R. Siegwart and A. Walter and E. Galceran},
    booktitle = {Proc. of the Workshop f\"ur Computer-Bildanalyse und unbemannte autonom fliegende Systeme in der Landwirtschaft},
    year = {2016},
    timestamp = {2016.06.15},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/liebisch16cbaws.pdf},
    }

  • C. Stachniss, J. Leonard, and S. Thrun, “Springer Handbook of Robotics, 2nd edition,” , B. Siciliano and O. Khatib, Eds., Springer, 2016.
    [BibTeX]
    @InBook{springerbook-slamchapter,
    author = {C. Stachniss and J. Leonard and S. Thrun},
    editor = {B. Siciliano and O. Khatib},
    title = {Springer Handbook of Robotics, 2nd edition},
    chapter = {Chapt.~46: Simultaneous Localization and Mapping},
    publisher = {Springer},
    year = 2016,
    }

  • P. Lottes, M. Höferlin, S. Sander, M. Müter, P. Schulze-Lammers, and C. Stachniss, “An Effective Classification System for Separating Sugar Beets and Weeds for Precision Farming Applications,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2016.
    [BibTeX] [PDF]
    @InProceedings{lottes2016icra,
    title = {An Effective Classification System for Separating Sugar Beets and Weeds for Precision Farming Applications},
    author = {P. Lottes and M. H\"oferlin and S. Sander and M. M\"uter and P. Schulze-Lammers and C. Stachniss},
    booktitle = icra,
    year = {2016},
    timestamp = {2016.01.15},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/lottes16icra.pdf},
    }

  • B. Mack, R. Roscher, S. Stenzel, H. Feilhauer, S. Schmidtlein, and B. Waske, “Mapping raised bogs with an iterative one-class classification approach,” ISPRS Journal of Photogrammetry and Remote Sensing, vol. 120, pp. 53-64, 2016. doi:https://dx.doi.org/10.1016/j.isprsjprs.2016.07.008
    [BibTeX] [PDF]

    Abstract Land use and land cover maps are one of the most commonly used remote sensing products. In many applications the user only requires a map of one particular class of interest, e.g. a specific vegetation type or an invasive species. One-class classifiers are appealing alternatives to common supervised classifiers because they can be trained with labeled training data of the class of interest only. However, training an accurate one-class classification (OCC) model is challenging, particularly when facing a large image, a small class and few training samples. To tackle these problems we propose an iterative \{OCC\} approach. The presented approach uses a biased Support Vector Machine as core classifier. In an iterative pre-classification step a large part of the pixels not belonging to the class of interest is classified. The remaining data is classified by a final classifier with a novel model and threshold selection approach. The specific objective of our study is the classification of raised bogs in a study site in southeast Germany, using multi-seasonal RapidEye data and a small number of training sample. Results demonstrate that the iterative \{OCC\} outperforms other state of the art one-class classifiers and approaches for model selection. The study highlights the potential of the proposed approach for an efficient and improved mapping of small classes such as raised bogs. Overall the proposed approach constitutes a feasible approach and useful modification of a regular one-class classifier.

    @Article{mack2016raised,
    title = {Mapping raised bogs with an iterative one-class classification approach },
    author = {Mack, Benjamin and Roscher, Ribana and Stenzel, Stefanie and Feilhauer, Hannes and Schmidtlein, Sebastian and Waske, Bj{\"o}rn},
    journal = {{ISPRS} Journal of Photogrammetry and Remote Sensing},
    year = {2016},
    pages = {53 - 64},
    volume = {120},
    abstract = {Abstract Land use and land cover maps are one of the most commonly used remote sensing products. In many applications the user only requires a map of one particular class of interest, e.g. a specific vegetation type or an invasive species. One-class classifiers are appealing alternatives to common supervised classifiers because they can be trained with labeled training data of the class of interest only. However, training an accurate one-class classification (OCC) model is challenging, particularly when facing a large image, a small class and few training samples. To tackle these problems we propose an iterative \{OCC\} approach. The presented approach uses a biased Support Vector Machine as core classifier. In an iterative pre-classification step a large part of the pixels not belonging to the class of interest is classified. The remaining data is classified by a final classifier with a novel model and threshold selection approach. The specific objective of our study is the classification of raised bogs in a study site in southeast Germany, using multi-seasonal RapidEye data and a small number of training sample. Results demonstrate that the iterative \{OCC\} outperforms other state of the art one-class classifiers and approaches for model selection. The study highlights the potential of the proposed approach for an efficient and improved mapping of small classes such as raised bogs. Overall the proposed approach constitutes a feasible approach and useful modification of a regular one-class classifier. },
    doi = {https://dx.doi.org/10.1016/j.isprsjprs.2016.07.008},
    issn = {0924-2716},
    keywords = {Remote sensing},
    url = {https://www.sciencedirect.com/science/article/pii/S0924271616302180},
    }

  • C. Merfels and C. Stachniss, “Pose Fusion with Chain Pose Graphs for Automated Driving,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2016.
    [BibTeX] [PDF]
    @InProceedings{merfels16iros,
    title = {Pose Fusion with Chain Pose Graphs for Automated Driving},
    author = {Ch. Merfels and C. Stachniss},
    booktitle = iros,
    year = {2016},
    url = {https://www.ipb.uni-bonn.de/pdfs/merfels16iros.pdf},
    }

  • L. Nardi and C. Stachniss, “Experience-Based Path Planning for Mobile Robots Exploiting User Preferences,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2016. doi:10.1109/IROS.2016.7759197
    [BibTeX] [PDF]

    The demand for flexible industrial robotic solutions that are able to accomplish tasks at different locations in a factory is growing more and more. When deploying mobile robots in a factory environment, the predictability and reproducibility of their behaviors become important and are often requested. In this paper, we propose an easy-to-use motion planning scheme that can take into account user preferences for robot navigation. The preferences are extracted implicitly from the previous experiences or from demonstrations and are automatically considered in the subsequent planning steps. This leads to reproducible and thus better to predict navigation behaviors of the robot, without requiring experts to hard-coding control strategies or cost functions within a planner. Our system has been implemented and evaluated on a simulated KUKA mobile robot in different environments.

    @InProceedings{nardi16iros,
    title = {Experience-Based Path Planning for Mobile Robots Exploiting User Preferences},
    author = {L. Nardi and C. Stachniss},
    booktitle = iros,
    year = {2016},
    doi = {10.1109/IROS.2016.7759197},
    abstract = {The demand for flexible industrial robotic solutions that are able to accomplish tasks at different locations in a factory is growing more and more. When deploying mobile robots in a factory environment, the predictability and reproducibility of their behaviors become important and are often requested. In this paper, we propose an easy-to-use motion planning scheme that can take into account user preferences for robot navigation. The preferences are extracted implicitly from the previous experiences or from demonstrations and are automatically considered in the subsequent planning steps. This leads to reproducible and thus better to predict navigation behaviors of the robot, without requiring experts to hard-coding control strategies or cost functions within a planner. Our system has been implemented and evaluated on a simulated KUKA mobile robot in different environments.},
    url = {https://www.ipb.uni-bonn.de/pdfs/nardi16iros.pdf},
    }

  • S. Osswald, M. Bennewitz, W. Burgard, and C. Stachniss, “Speeding-Up Robot Exploration by Exploiting Background Information,” IEEE Robotics and Automation Letters (RA-L), 2016.
    [BibTeX] [PDF]
    @Article{osswald16ral,
    title = {Speeding-Up Robot Exploration by Exploiting Background Information},
    author = {S. Osswald and M. Bennewitz and W. Burgard and C. Stachniss},
    journal = ral,
    year = {2016},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/osswald16ral.pdf},
    }

  • D. Perea-Ström, I. Bogoslavskyi, and C. Stachniss, “Robust Exploration and Homing for Autonomous Robots,” in Journal on Robotics and Autonomous Systems (RAS), 2016.
    [BibTeX] [PDF]
    @InProceedings{perea16jras,
    title = {Robust Exploration and Homing for Autonomous Robots},
    author = {D. Perea-Str{\"o}m and I. Bogoslavskyi and C. Stachniss},
    booktitle = jras,
    year = {2016},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/perea16jras.pdf},
    }

  • R. Roscher, J. Behmann, A. -K. Mahlein, J. Dupuis, H. Kuhlmann, and L. Plümer, “Detection of Disease Symptoms on Hyperspectral 3D Plant Models,” in ISPRS Annals of Photogrammetry, Remote Sensing and Spatial Information Sciences, 2016, p. 89–96.
    [BibTeX]

    We analyze the benefit of combining hyperspectral images information with 3D geometry information for the detection of Cercospora leaf spot disease symptoms on sugar beet plants. Besides commonly used one-class Support Vector Machines, we utilize an unsupervised sparse representation-based approach with group sparsity prior. Geometry information is incorporated by representing each sample of interest with an inclination-sorted dictionary, which can be seen as an 1D topographic dictionary. We compare this approach with a sparse representation based approach without geometry information and One-Class Support Vector Machines. One-Class Support Vector Machines are applied to hyperspectral data without geometry information as well as to hyperspectral images with additional pixelwise inclination information. Our results show a gain in accuracy when using geometry information beside spectral information regardless of the used approach. However, both methods have different demands on the data when applied to new test data sets. One-Class Support Vector Machines require full inclination information on test and training data whereas the topographic dictionary approach only need spectral information for reconstruction of test data once the dictionary is build by spectra with inclination.

    @InProceedings{roscher2016detection,
    title = {Detection of Disease Symptoms on Hyperspectral {3D} Plant Models},
    author = {Roscher, R. and Behmann, J. and Mahlein, A.-K. and Dupuis, J. and Kuhlmann, H. and Pl{\"u}mer, L.},
    booktitle = {ISPRS Annals of Photogrammetry, Remote Sensing and Spatial Information Sciences},
    year = {2016},
    pages = {89--96},
    abstract = {We analyze the benefit of combining hyperspectral images information with 3D geometry information for the detection of Cercospora leaf spot disease symptoms on sugar beet plants. Besides commonly used one-class Support Vector Machines, we utilize an unsupervised sparse representation-based approach with group sparsity prior. Geometry information is incorporated by representing each sample of interest with an inclination-sorted dictionary, which can be seen as an 1D topographic dictionary. We compare this approach with a sparse representation based approach without geometry information and One-Class Support Vector Machines. One-Class Support Vector Machines are applied to hyperspectral data without geometry information as well as to hyperspectral images with additional pixelwise inclination information. Our results show a gain in accuracy when using geometry information beside spectral information regardless of the used approach. However, both methods have different demands on the data when applied to new test data sets. One-Class Support Vector Machines require full inclination information on test and training data whereas the topographic dictionary approach only need spectral information for reconstruction of test data once the dictionary is build by spectra with inclination.},
    }

  • R. Roscher, J. Behmann, A. -K. Mahlein, and L. Plümer, “On the Benefit of Topographic Dictionaries for Detecting Disease Symptoms on Hyperspectral 3D Plant Models,” in Workshop on Hyperspectral Image and Signal Processing, 2016.
    [BibTeX]

    We analyze the benefit of using topographic dictionaries for a sparse representation (SR) approach for the detection of Cercospora leaf spot disease symptoms on sugar beet plants. Topographic dictionaries are an arranged set of basis elements in which neighbored dictionary elements tend to cause similar activations in the SR approach. In this paper, the dictionary is obtained from samples of a healthy plant and partly build in a topographic way by using hyperspectral as well as geometry information, i.e. depth and inclination. It turns out that hyperspectral signals of leafs show a typical structure depending on depth and inclination and thus, both influences can be disentangled in our approach. Rare signals which do not fit into this model, e.g. leaf veins, are also captured in the dictionary in a non-topographic way. A reconstruction error index is used as indicator, in which disease symptoms can be distinguished from healthy plant regions.nThe advantage of the presented approach is that full spectral and geometry information is needed only once to built the dictionary, whereas the sparse reconstruction is done solely on hyperspectral information.

    @InProceedings{roscher2016topographic,
    title = {On the Benefit of Topographic Dictionaries for Detecting Disease Symptoms on Hyperspectral 3D Plant Models},
    author = {Roscher, R. and Behmann, J. and Mahlein, A.-K. and Pl{\"u}mer, L.},
    booktitle = {Workshop on Hyperspectral Image and Signal Processing},
    year = {2016},
    abstract = {We analyze the benefit of using topographic dictionaries for a sparse representation (SR) approach for the detection of Cercospora leaf spot disease symptoms on sugar beet plants. Topographic dictionaries are an arranged set of basis elements in which neighbored dictionary elements tend to cause similar activations in the SR approach. In this paper, the dictionary is obtained from samples of a healthy plant and partly build in a topographic way by using hyperspectral as well as geometry information, i.e. depth and inclination. It turns out that hyperspectral signals of leafs show a typical structure depending on depth and inclination and thus, both influences can be disentangled in our approach. Rare signals which do not fit into this model, e.g. leaf veins, are also captured in the dictionary in a non-topographic way. A reconstruction error index is used as indicator, in which disease symptoms can be distinguished from healthy plant regions.nThe advantage of the presented approach is that full spectral and geometry information is needed only once to built the dictionary, whereas the sparse reconstruction is done solely on hyperspectral information.},
    owner = {ribana},
    timestamp = {2016.06.20},
    }

  • R. Roscher, S. Wenzel, and B. Waske, “Discriminative Archetypal Self-taught Learning for Multispectral Landcover Classification,” in Proc. of Pattern Recogniton in Remote Sensing 2016 (PRRS), Workshop at ICPR; to appear in IEEE Xplore, 2016.
    [BibTeX] [PDF]

    Self-taught learning (STL) has become a promising paradigm to exploit unlabeled data for classification. The most commonly used approach to self-taught learning is sparse representation, in which it is assumed that each sample can be represented by a weighted linear combination of elements of a unlabeled dictionary. This paper proposes discriminative archetypal self-taught learning for the application of landcover classification, in which unlabeled discriminative archetypal samples are selected to build a powerful dictionary. Our main contribution is to present an approach which utilizes reversible jump Markov chain Monte Carlo method to jointly determine the best set of archetypes and the number of elements to build the dictionary. Experiments are conducted using synthetic data, a multi-spectral Landsat 7 image of a study area in the Ukraine and the Zurich benchmark data set comprising 20 multispectral Quickbird images. Our results confirm that the proposed approach can learn discriminative features for classification and show better classification results compared to self-taught learning with the original feature representation and compared to randomly initialized archetypal dictionaries.

    @InProceedings{roscher2016discriminative,
    title = {Discriminative Archetypal Self-taught Learning for Multispectral Landcover Classification},
    author = {Roscher, R. and Wenzel, S. and Waske, B.},
    booktitle = {Proc. of Pattern Recogniton in Remote Sensing 2016 (PRRS), Workshop at ICPR; to appear in IEEE Xplore},
    year = {2016},
    abstract = {Self-taught learning (STL) has become a promising paradigm to exploit unlabeled data for classification. The most commonly used approach to self-taught learning is sparse representation, in which it is assumed that each sample can be represented by a weighted linear combination of elements of a unlabeled dictionary. This paper proposes discriminative archetypal self-taught learning for the application of landcover classification, in which unlabeled discriminative archetypal samples are selected to build a powerful dictionary. Our main contribution is to present an approach which utilizes reversible jump Markov chain Monte Carlo method to jointly determine the best set of archetypes and the number of elements to build the dictionary. Experiments are conducted using synthetic data, a multi-spectral Landsat 7 image of a study area in the Ukraine and the Zurich benchmark data set comprising 20 multispectral Quickbird images. Our results confirm that the proposed approach can learn discriminative features for classification and show better classification results compared to self-taught learning with the original feature representation and compared to randomly initialized archetypal dictionaries.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Roscher2016Discriminative.pdf},
    }

  • J. Schneider, C. Eling, L. Klingbeil, H. Kuhlmann, W. Förstner, and C. Stachniss, “Fast and Effective Online Pose Estimation and Mapping for UAVs,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2016, p. 4784–4791. doi:10.1109/ICRA.2016.7487682
    [BibTeX] [PDF]

    Online pose estimation and mapping in unknown environments is essential for most mobile robots. Especially autonomous unmanned aerial vehicles require good pose estimates at comparably high frequencies. In this paper, we propose an effective system for online pose and simultaneous map estimation designed for light-weight UAVs. Our system consists of two components: (1) real-time pose estimation combining RTK-GPS and IMU at 100 Hz and (2) an effective SLAM solution running at 10 Hz using image data from an omnidirectional multi-fisheye-camera system. The SLAM procedure combines spatial resection computed based on the map that is incrementally refined through bundle adjustment and combines the image data with raw GPS observations and IMU data on keyframes. The overall system yields a real-time, georeferenced pose at 100 Hz in GPS-friendly situations. Additionally, we obtain a precise pose and feature map at 10 Hz even in cases where the GPS is not observable or underconstrained. Our system has been implemented and thoroughly tested on a 5 kg copter and yields accurate and reliable pose estimation at high frequencies. We compare the point cloud obtained by our method with a model generated from georeferenced terrestrial laser scanner.

    @InProceedings{schneider16icra,
    title = {Fast and Effective Online Pose Estimation and Mapping for UAVs},
    author = {J. Schneider and C. Eling and L. Klingbeil and H. Kuhlmann and W. F\"orstner and C. Stachniss},
    booktitle = icra,
    year = {2016},
    pages = {4784--4791},
    abstract = {Online pose estimation and mapping in unknown environments is essential for most mobile robots. Especially autonomous unmanned aerial vehicles require good pose estimates at comparably high frequencies. In this paper, we propose an effective system for online pose and simultaneous map estimation designed for light-weight UAVs. Our system consists of two components: (1) real-time pose estimation combining RTK-GPS and IMU at 100 Hz and (2) an effective SLAM solution running at 10 Hz using image data from an omnidirectional multi-fisheye-camera system. The SLAM procedure combines spatial resection computed based on the map that is incrementally refined through bundle adjustment and combines the image data with raw GPS observations and IMU data on keyframes. The overall system yields a real-time, georeferenced pose at 100 Hz in GPS-friendly situations. Additionally, we obtain a precise pose and feature map at 10 Hz even in cases where the GPS is not observable or underconstrained. Our system has been implemented and thoroughly tested on a 5 kg copter and yields accurate and reliable pose estimation at high frequencies. We compare the point cloud obtained by our method with a model generated from georeferenced terrestrial laser scanner.},
    doi = {10.1109/ICRA.2016.7487682},
    url = {https://www.ipb.uni-bonn.de/pdfs/schneider16icra.pdf},
    }

  • J. Schneider, C. Stachniss, and W. Förstner, “Dichtes Stereo mit Fisheye-Kameras,” in UAV 2016 – Vermessung mit unbemannten Flugsystemen, 2016, pp. 247-264.
    [BibTeX]
    @InProceedings{schneider16dvw,
    title = {Dichtes Stereo mit Fisheye-Kameras},
    author = {J. Schneider and C. Stachniss and W. F\"orstner},
    booktitle = {UAV 2016 -- Vermessung mit unbemannten Flugsystemen},
    year = {2016},
    pages = {247-264},
    publisher = {Wi{\ss}ner Verlag},
    series = {Schriftenreihe des DVW},
    volume = {82},
    }

  • J. Schneider, C. Stachniss, and W. Förstner, “On the Accuracy of Dense Fisheye Stereo,” IEEE Robotics and Automation Letters (RA-L), vol. 1, iss. 1, pp. 227-234, 2016. doi:10.1109/LRA.2016.2516509
    [BibTeX] [PDF]

    Fisheye cameras offer a large field of view, which is important for several robotics applications as a larger field of view allows for covering a large area with a single image. In contrast to classical cameras, however, fisheye cameras cannot be approximated well using the pinhole camera model and this renders the computation of depth information from fisheye stereo image pairs more complicated. In this work, we analyze the combination of an epipolar rectification model for fisheye stereo cameras with existing dense methods. This has the advantage that existing dense stereo systems can be applied as a black-box even with cameras that have field of view of more than 180 deg to obtain dense disparity information. We thoroughly investigate the accuracy potential of such fisheye stereo systems using image data from our UAV. The empirical analysis is based on image pairs of a calibrated fisheye stereo camera system and two state-of-the-art algorithms for dense stereo applied to adequately rectified image pairs from fisheye stereo cameras. The canonical stochastic model for sensor points assumes homogeneous uncertainty and we generalize this model based on an empirical analysis using a test scene consisting of mutually orthogonal planes. We show (1) that the combination of adequately rectified fisheye image pairs and dense methods provides dense 3D point clouds at 6-7 Hz on our autonomous multi-copter UAV, (2) that the uncertainty of points depends on their angular distance from the optical axis, (3) how to estimate the variance component as a function of that distance, and (4) how the improved stochastic model improves the accuracy of the scene points.

    @Article{schneider16ral,
    title = {On the Accuracy of Dense Fisheye Stereo},
    author = {J. Schneider and C. Stachniss and W. F\"orstner},
    journal = ral,
    year = {2016},
    number = {1},
    pages = {227-234},
    volume = {1},
    abstract = {Fisheye cameras offer a large field of view, which is important for several robotics applications as a larger field of view allows for covering a large area with a single image. In contrast to classical cameras, however, fisheye cameras cannot be approximated well using the pinhole camera model and this renders the computation of depth information from fisheye stereo image pairs more complicated. In this work, we analyze the combination of an epipolar rectification model for fisheye stereo cameras with existing dense methods. This has the advantage that existing dense stereo systems can be applied as a black-box even with cameras that have field of view of more than 180 deg to obtain dense disparity information. We thoroughly investigate the accuracy potential of such fisheye stereo systems using image data from our UAV. The empirical analysis is based on image pairs of a calibrated fisheye stereo camera system and two state-of-the-art algorithms for dense stereo applied to adequately rectified image pairs from fisheye stereo cameras. The canonical stochastic model for sensor points assumes homogeneous uncertainty and we generalize this model based on an empirical analysis using a test scene consisting of mutually orthogonal planes. We show (1) that the combination of adequately rectified fisheye image pairs and dense methods provides dense 3D point clouds at 6-7 Hz on our autonomous multi-copter UAV, (2) that the uncertainty of points depends on their angular distance from the optical axis, (3) how to estimate the variance component as a function of that distance, and (4) how the improved stochastic model improves the accuracy of the scene points.},
    doi = {10.1109/LRA.2016.2516509},
    url = {https://www.ipb.uni-bonn.de/pdfs/schneider16ral.pdf},
    }

  • T. Schubert, S. Wenzel, R. Roscher, and C. Stachniss, “Investigation of Latent Traces Using Infrared Reflectance Hyperspectral Imaging,” in ISPRS Annals of Photogrammetry, Remote Sensing and Spatial Information Sciences, 2016, p. 97–102. doi:10.5194/isprs-annals-III-7-97-2016
    [BibTeX] [PDF]

    The detection of traces is a main task of forensic science. A potential method is hyperspectral imaging (HSI) from which we expect to capture more fluorescence effects than with common Forensic Light Sources (FLS). Specimen of blood, semen and saliva traces in several dilution steps are prepared on cardboard substrate. As our key result we successfully make latent traces visible up to highest available dilution (1:8000). We can attribute most of the detectability to interference of electromagnetic light with the water content of the traces in the Shortwave Infrared region of the spectrum. In a classification task we use several dimensionality reduction methods (PCA and LDA) in combination with a Maximum Likelihood (ML) classifier assuming normally distributed data. Random Forest builds a competitive approach. The classifiers retrieve the exact positions of labeled trace preparation up to highest dilution and determine posterior probabilities. By modeling the classification with a Markov Random Field we obtain smoothed results.

    @InProceedings{schubert2016investigation,
    title = {{Investigation of Latent Traces Using Infrared Reflectance Hyperspectral Imaging}},
    author = {Schubert, Till and Wenzel, Susanne and Roscher, Ribana and Stachniss, Cyrill},
    booktitle = {ISPRS Annals of Photogrammetry, Remote Sensing and Spatial Information Sciences},
    year = {2016},
    pages = {97--102},
    volume = {III-7},
    abstract = {The detection of traces is a main task of forensic science. A potential method is hyperspectral imaging (HSI) from which we expect to capture more fluorescence effects than with common Forensic Light Sources (FLS). Specimen of blood, semen and saliva traces in several dilution steps are prepared on cardboard substrate. As our key result we successfully make latent traces visible up to highest available dilution (1:8000). We can attribute most of the detectability to interference of electromagnetic light with the water content of the traces in the Shortwave Infrared region of the spectrum. In a classification task we use several dimensionality reduction methods (PCA and LDA) in combination with a Maximum Likelihood (ML) classifier assuming normally distributed data. Random Forest builds a competitive approach. The classifiers retrieve the exact positions of labeled trace preparation up to highest dilution and determine posterior probabilities. By modeling the classification with a Markov Random Field we obtain smoothed results.},
    doi = {10.5194/isprs-annals-III-7-97-2016},
    url = {https://www.ipb.uni-bonn.de/pdfs/Schubert2016Investigation.pdf},
    }

  • C. Siedentop, V. Laukhart, B. Krastev, D. Kasper, A. Wenden, G. Breuel, and C. Stachniss, “Autonomous Parking Using Previous Paths,” in Advanced Microsystems for Automotive Applications 2015: Smart Systems for Green and Automated Driving. Lecture Notes in Mobility, T. Schulze, B. Müller, and G. Meyer, Eds., Springer, 2016, pp. 3-14. doi:10.1007/978-3-319-20855-8_1
    [BibTeX]
    @InBook{siedentop16lnb,
    title = {Autonomous Parking Using Previous Paths},
    author = {C. Siedentop and V. Laukhart and B. Krastev and D. Kasper and A. Wenden and G. Breuel and C. Stachniss},
    editor = {T. Schulze and B. M{\"u}ller and G. Meyer},
    pages = {3-14},
    publisher = {Springer},
    year = {2016},
    booktitle = {Advanced Microsystems for Automotive Applications 2015: Smart Systems for Green and Automated Driving. Lecture Notes in Mobility},
    doi = {10.1007/978-3-319-20855-8_1},
    }

  • C. Stachniss, “Springer Handbook of Photogrammetry.” Springer, 2016.
    [BibTeX]
    @InBook{springerbook-photo-slamchapter,
    author = {C. Stachniss},
    title = {Springer Handbook of Photogrammetry},
    chapter = {Simultaneous Localization and Mapping},
    publisher = {Springer},
    note = {In German},
    year = {2016},
    }

  • O. Vysotska and C. Stachniss, “Exploiting Building Information from Publicly Available Maps in Graph-Based SLAM,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2016.
    [BibTeX] [PDF] [Video]
    [none]
    @InProceedings{vysotska16iros,
    title = {Exploiting Building Information from Publicly Available Maps in Graph-Based SLAM},
    author = {O. Vysotska and C. Stachniss},
    booktitle = iros,
    year = {2016},
    abstract = {[none]},
    url = {https://www.ipb.uni-bonn.de/pdfs/vysotska16iros.pdf},
    videourl = {https://www.youtube.com/watch?v=5RfRAEP-baM},
    }

  • O. Vysotska and C. Stachniss, “Lazy Data Association For Image Sequences Matching Under Substantial Appearance Changes,” IEEE Robotics and Automation Letters (RA-L), vol. 1, iss. 1, pp. 213-220, 2016. doi:10.1109/LRA.2015.2512936
    [BibTeX] [PDF] [Code] [Video]

    Localization is an essential capability for mobile robots and the ability to localize in changing environments is key to robust outdoor navigation. Robots operating over extended periods of time should be able to handle substantial appearance changes such as those occurring over seasons or under different weather conditions. In this letter, we investigate the problem of efficiently coping with seasonal appearance changes in online localization. We propose a lazy data association approach for matching streams of incoming images to a reference image sequence in an online fashion. We present a search heuristic to quickly find matches between the current image sequence and a database using a data association graph. Our experiments conducted under substantial seasonal changes suggest that our approach can efficiently match image sequences while requiring a comparably small number of image to image comparisons

    @Article{vysotska16ral,
    title = {Lazy Data Association For Image Sequences Matching Under Substantial Appearance Changes},
    author = {O. Vysotska and C. Stachniss},
    journal = ral,
    year = {2016},
    number = {1},
    pages = {213-220},
    volume = {1},
    abstract = {Localization is an essential capability for mobile robots and the ability to localize in changing environments is key to robust outdoor navigation. Robots operating over extended periods of time should be able to handle substantial appearance changes such as those occurring over seasons or under different weather conditions. In this letter, we investigate the problem of efficiently coping with seasonal appearance changes in online localization. We propose a lazy data association approach for matching streams of incoming images to a reference image sequence in an online fashion. We present a search heuristic to quickly find matches between the current image sequence and a database using a data association graph. Our experiments conducted under substantial seasonal changes suggest that our approach can efficiently match image sequences while requiring a comparably small number of image to image comparisons},
    doi = {10.1109/LRA.2015.2512936},
    timestamp = {2016.04.18},
    url = {https://www.ipb.uni-bonn.de/pdfs/vysotska16ral-icra.pdf},
    codeurl = {https://github.com/Photogrammetry-Robotics-Bonn/online_place_recognition},
    videourl = {https://www.youtube.com/watch?v=l-hNk7Z4lSk},
    }

  • S. Wenzel, “High-Level Facade Image Interpretation using Marked Point Processes,” PhD Thesis, 2016.
    [BibTeX] [PDF]

    In this thesis, we address facade image interpretation as one essential ingredient for the generation of high-detailed, semantic meaningful, three-dimensional city-models. Given a single rectified facade image, we detect relevant facade objects such as windows, entrances, and balconies, which yield a description of the image in terms of accurate position and size of these objects. Urban digital three-dimensional reconstruction and documentation is an active area of research with several potential applications, e.g., in the area of digital mapping for navigation, urban planning, emergency management, disaster control or the entertainment industry. A detailed building model which is not just a geometric object enriched with texture, allows for semantic requests as the number of floors or the location of balconies and entrances. Facade image interpretation is one essential step in order to yield such models. In this thesis, we propose the interpretation of facade images by combining evidence for the occurrence of individual object classes which we derive from data, and prior knowledge which guides the image interpretation in its entirety. We present a three-step procedure which generates features that are suited to describe relevant objects, learns a representation that is suited for object detection, and that enables the image interpretation using the results of object detection while incorporating prior knowledge about typical configurations of facade objects, which we learn from training data. According to these three sub-tasks, our major achievements are: We propose a novel method for facade image interpretation based on a marked point process. Therefor, we develop a model for the description of typical configurations of facade objects and propose an image interpretation system which combines evidence derived from data and prior knowledge about typical configurations of facade objects. In order to generate evidence from data, we propose a feature type which we call shapelets. They are scale invariant and provide large distinctiveness for facade objects. Segments of lines, arcs, and ellipses serve as basic features for the generation of shapelets. Therefor, we propose a novel line simplification approach which approximates given pixel-chains by a sequence of lines, circular, and elliptical arcs. Among others, it is based on an adaption to Douglas-Peucker’s algorithm, which is based on circles as basic geometric elements We evaluate each step separately. We show the effects of polyline segmentation and simplification on several images with comparable good or even better results, referring to a state-of-the-art algorithm, which proves their large distinctiveness for facade objects. Using shapelets we provide a reasonable classification performance on a challenging dataset, including intra-class variations, clutter, and scale changes. Finally, we show promising results for the facade interpretation system on several datasets and provide a qualitative evaluation which demonstrates the capability of complete and accurate detection of facade objects.

    @PhDThesis{wenzel2016high-level,
    title = {High-Level Facade Image Interpretation using Marked Point Processes},
    author = {Wenzel, Susanne},
    school = {Department of Photogrammetry, University of Bonn},
    year = {2016},
    abstract = {In this thesis, we address facade image interpretation as one essential ingredient for the generation of high-detailed, semantic meaningful, three-dimensional city-models. Given a single rectified facade image, we detect relevant facade objects such as windows, entrances, and balconies, which yield a description of the image in terms of accurate position and size of these objects. Urban digital three-dimensional reconstruction and documentation is an active area of research with several potential applications, e.g., in the area of digital mapping for navigation, urban planning, emergency management, disaster control or the entertainment industry. A detailed building model which is not just a geometric object enriched with texture, allows for semantic requests as the number of floors or the location of balconies and entrances. Facade image interpretation is one essential step in order to yield such models. In this thesis, we propose the interpretation of facade images by combining evidence for the occurrence of individual object classes which we derive from data, and prior knowledge which guides the image interpretation in its entirety. We present a three-step procedure which generates features that are suited to describe relevant objects, learns a representation that is suited for object detection, and that enables the image interpretation using the results of object detection while incorporating prior knowledge about typical configurations of facade objects, which we learn from training data. According to these three sub-tasks, our major achievements are: We propose a novel method for facade image interpretation based on a marked point process. Therefor, we develop a model for the description of typical configurations of facade objects and propose an image interpretation system which combines evidence derived from data and prior knowledge about typical configurations of facade objects. In order to generate evidence from data, we propose a feature type which we call shapelets. They are scale invariant and provide large distinctiveness for facade objects. Segments of lines, arcs, and ellipses serve as basic features for the generation of shapelets. Therefor, we propose a novel line simplification approach which approximates given pixel-chains by a sequence of lines, circular, and elliptical arcs. Among others, it is based on an adaption to Douglas-Peucker's algorithm, which is based on circles as basic geometric elements We evaluate each step separately. We show the effects of polyline segmentation and simplification on several images with comparable good or even better results, referring to a state-of-the-art algorithm, which proves their large distinctiveness for facade objects. Using shapelets we provide a reasonable classification performance on a challenging dataset, including intra-class variations, clutter, and scale changes. Finally, we show promising results for the facade interpretation system on several datasets and
    provide a qualitative evaluation which demonstrates the capability of complete and accurate detection of facade objects.},
    city = {Bonn},
    url = {https://hss.ulb.uni-bonn.de/2016/4412/4412.htm},
    }

  • S. Wenzel and W. Förstner, “Facade Interpretation Using a Marked Point Process,” in ISPRS Annals of Photogrammetry, Remote Sensing and Spatial Information Sciences, 2016, p. 363–370. doi:10.5194/isprs-annals-III-3-363-2016
    [BibTeX] [PDF]

    Our objective is the interpretation of facade images in a top-down manner, using a Markov marked point process formulated as a Gibbs process. Given single rectified facade images we aim at the accurate detection of relevant facade objects as windows and entrances, using prior knowledge about their possible configurations within facade images. We represent facade objects by a simplified rectangular object model and present an energy model which evaluates the agreement of a proposed configuration with the given image and the statistics about typical configurations which we learned from training data. We show promising results on different datasets and provide a quantitative evaluation, which demonstrates the capability of complete and accurate detection of facade objects.

    @InProceedings{wenzel2016facade,
    title = {{Facade Interpretation Using a Marked Point Process}},
    author = {Wenzel, Susanne and F{\" o}rstner, Wolfgang},
    booktitle = {ISPRS Annals of Photogrammetry, Remote Sensing and Spatial Information Sciences},
    year = {2016},
    pages = {363--370},
    volume = {III-3},
    abstract = {Our objective is the interpretation of facade images in a top-down manner, using a Markov marked point process formulated as a Gibbs process. Given single rectified facade images we aim at the accurate detection of relevant facade objects as windows and entrances, using prior knowledge about their possible configurations within facade images. We represent facade objects by a simplified rectangular object model and present an energy model which evaluates the agreement of a proposed configuration with the given image and the statistics about typical configurations which we learned from training data. We show promising results on different datasets and provide a quantitative evaluation, which demonstrates the capability of complete and accurate detection of facade objects.},
    doi = {10.5194/isprs-annals-III-3-363-2016},
    url = {https://www.ipb.uni-bonn.de/pdfs/Wenzel2016Facade.pdf},
    }

  • C. Merfels, T. Riemenschneider, and C. Stachniss, “Pose fusion with biased and dependent data for automated driving,” in Proc. of the Positioning and Navigation for Intelligent Transportation Systems Conf. (POSNAV ITS), 2016.
    [BibTeX] [PDF]
    @InProceedings{merfels2016posnav,
    author = {C. Merfels and T. Riemenschneider and C. Stachniss},
    title = {Pose fusion with biased and dependent data for automated driving},
    booktitle = {Proc. of the Positioning and Navigation for Intelligent Transportation Systems Conf. (POSNAV ITS)},
    year = 2016,
    }

2015

  • N. Abdo, C. Stachniss, L. Spinello, and W. Burgard, “Collaborative Filtering for Predicting User Preferences for Organizing Objects,” arXiv Preprint, vol. abs/1512.06362, 2015.
    [BibTeX] [PDF]
    [none]
    @Article{abdo15arxiv,
    title = {Collaborative Filtering for Predicting User Preferences for Organizing Objects},
    author = {N. Abdo and C. Stachniss and L. Spinello and W. Burgard},
    journal = arxiv,
    year = {2015},
    note = {arXiv:1512.06362 [cs.RO]},
    volume = {abs/1512.06362},
    abstract = {[none]},
    timestamp = {2016.04.18},
    url = {https://arxiv.org/abs/1512.06362},
    }

  • N. Abdo, C. Stachniss, L. Spinello, and W.Burgard, “Robot, Organize my Shelves! Tidying up Objects by Predicting User Preferences,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2015, pp. 1557-1564. doi:10.1109/ICRA.2015.7139396
    [BibTeX] [PDF]

    As service robots become more and more capable of performing useful tasks for us, there is a growing need to teach robots how we expect them to carry out these tasks. However, learning our preferences is a nontrivial problem, as many of them stem from a variety of factors including personal taste, cultural background, or common sense. Obviously, such factors are hard to formulate or model a priori. In this paper, we present a solution for tidying up objects in containers, e.g., shelves or boxes, by following user preferences. We learn the user preferences using collaborative filtering based on crowdsourced and mined data. First, we predict pairwise object preferences of the user. Then, we subdivide the objects in containers by modeling a spectral clustering problem. Our solution is easy to update, does not require complex modeling, and improves with the amount of user data. We evaluate our approach using crowdsoucing data from over 1,200 users and demonstrate its effectiveness for two tidy-up scenarios. Additionally, we show that a real robot can reliably predict user preferences using our approach.

    @InProceedings{abdo15icra,
    title = {Robot, Organize my Shelves! Tidying up Objects by Predicting User Preferences},
    author = {N. Abdo and C. Stachniss and L. Spinello and W.Burgard},
    booktitle = icra,
    year = {2015},
    pages = {1557-1564},
    abstract = {As service robots become more and more capable of performing useful tasks for us, there is a growing need to teach robots how we expect them to carry out these tasks. However, learning our preferences is a nontrivial problem, as many of them stem from a variety of factors including personal taste, cultural background, or common sense. Obviously, such factors are hard to formulate or model a priori. In this paper, we present a solution for tidying up objects in containers, e.g., shelves or boxes, by following user preferences. We learn the user preferences using collaborative filtering based on crowdsourced and mined data. First, we predict pairwise object preferences of the user. Then, we subdivide the objects in containers by modeling a spectral clustering problem. Our solution is easy to update, does not require complex modeling, and improves with the amount of user data. We evaluate our approach using crowdsoucing data from over 1,200 users and demonstrate its effectiveness for two tidy-up scenarios. Additionally, we show that a real robot can reliably predict user preferences using our approach.},
    doi = {10.1109/ICRA.2015.7139396},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/abdo15icra.pdf},
    }

  • I. Bogoslavskyi, L. Spinello, W. Burgard, and C. Stachniss, “Where to Park%3F Minimizing the Expected Time to Find a Parking Space,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2015, pp. 2147-2152. doi:10.1109/ICRA.2015.7139482
    [BibTeX] [PDF]

    Quickly finding a free parking spot that is close to a desired target location can be a difficult task. This holds for human drivers and autonomous cars alike. In this paper, we investigate the problem of predicting the occupancy of parking spaces and exploiting this information during route planning. We propose an MDP-based planner that considers route information as well as the occupancy probabilities of parking spaces to compute the path that minimizes the expected total time for finding an unoccupied parking space and for walking from the parking location to the target destination. We evaluated our system on real world data gathered over several days in a real parking lot. We furthermore compare our approach to three parking strategies and show that our method outperforms the alternative behaviors.

    @InProceedings{bogoslavskyi15icra,
    title = {Where to Park? Minimizing the Expected Time to Find a Parking Space},
    author = {I. Bogoslavskyi and L. Spinello and W. Burgard and C. Stachniss},
    booktitle = icra,
    year = {2015},
    pages = {2147-2152},
    abstract = {Quickly finding a free parking spot that is close to a desired target location can be a difficult task. This holds for human drivers and autonomous cars alike. In this paper, we investigate the problem of predicting the occupancy of parking spaces and exploiting this information during route planning. We propose an MDP-based planner that considers route information as well as the occupancy probabilities of parking spaces to compute the path that minimizes the expected total time for finding an unoccupied parking space and for walking from the parking location to the target destination. We evaluated our system on real world data gathered over several days in a real parking lot. We furthermore compare our approach to three parking strategies and show that our method outperforms the alternative behaviors.},
    doi = {10.1109/ICRA.2015.7139482},
    timestamp = {2015.06.29},
    url = {https://www.ipb.uni-bonn.de/pdfs/bogoslavskyi15icra.pdf},
    }

  • F. M. Carlucci, L. Nardi, L. Iocchi, and D. Nardi, “Explicit Representation of Social Norms for Social Robots,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2015, pp. 4191-4196. doi:10.1109/IROS.2015.7353970
    [BibTeX] [PDF]

    As robots are expected to become more and more available in everyday environments, interaction with humans is assuming a central role. Robots working in populated environments are thus expected to demonstrate socially acceptable behaviors and to follow social norms. However, most of the recent works in this field do not address the problem of explicit representation of the social norms and their integration in the reasoning and the execution components of a cognitive robot. In this paper, we address the design of robotic systems that support some social behavior by implementing social norms. We present a framework for planning and execution of social plans, in which social norms are described in a domain and language independent form. A full implementation of the proposed framework is described and tested in a realistic scenario with non-expert and non-recruited users.

    @InProceedings{carlucci15iros,
    title = {Explicit Representation of Social Norms for Social Robots},
    author = {F.M. Carlucci and L. Nardi and L. Iocchi and D. Nardi},
    booktitle = iros,
    year = {2015},
    pages = {4191 - 4196},
    abstract = {As robots are expected to become more and more available in everyday environments, interaction with humans is assuming a central role. Robots working in populated environments are thus expected to demonstrate socially acceptable behaviors and to follow social norms. However, most of the recent works in this field do not address the problem of explicit representation of the social norms and their integration in the reasoning and the execution components of a cognitive robot. In this paper, we address the design of robotic systems that support some social behavior by implementing social norms. We present a framework for planning and execution of social plans, in which social norms are described in a domain and language independent form. A full implementation of the proposed framework is described and tested in a realistic scenario with non-expert and non-recruited users.},
    doi = {10.1109/IROS.2015.7353970},
    timestamp = {2016.04.19},
    url = {https://www.ipb.uni-bonn.de/pdfs/Carlucci2015Explicit.pdf},
    }

  • T. Naseer, M. Ruhnke, L. Spinello, C. Stachniss, and W. Burgard, “Robust Visual SLAM Across Seasons,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2015, pp. 2529-2535. doi:10.1109/IROS.2015.7353721
    [BibTeX] [PDF]

    In this paper, we present an appearance-based visual SLAM approach that focuses on detecting loop closures across seasons. Given two image sequences, our method first extracts one descriptor per image for both sequences using a deep convolutional neural network. Then, we compute a similarity matrix by comparing each image of a query sequence with a database. Finally, based on the similarity matrix, we formulate a flow network problem and compute matching hypotheses between sequences. In this way, our approach can handle partially matching routes, loops in the trajectory and different speeds of the robot. With a matching hypothesis as loop closure information and the odometry information of the robot, we formulate a graph based SLAM problem and compute a joint maximum likelihood trajectory.

    @InProceedings{naseer15iros,
    title = {Robust Visual SLAM Across Seasons},
    author = {Naseer, Tayyab and Ruhnke, Michael and Spinello, Luciano and Stachniss, Cyrill and Burgard, Wolfram},
    booktitle = iros,
    year = {2015},
    pages = {2529 - 2535},
    abstract = {In this paper, we present an appearance-based visual SLAM approach that focuses on detecting loop closures across seasons. Given two image sequences, our method first extracts one descriptor per image for both sequences using a deep convolutional neural network. Then, we compute a similarity matrix by comparing each image of a query sequence with a database. Finally, based on the similarity matrix, we formulate a flow network problem and compute matching hypotheses between sequences. In this way, our approach can handle partially matching routes, loops in the trajectory and different speeds of the robot. With a matching hypothesis as loop closure information and the odometry information of the robot, we formulate a graph based SLAM problem and compute a joint maximum likelihood trajectory.},
    doi = {10.1109/IROS.2015.7353721},
    timestamp = {2016.04.19},
    url = {https://www.ipb.uni-bonn.de/pdfs/Naseer2015Robust.pdf},
    }

  • D. Perea-Ström, F. Nenci, and C. Stachniss, “Predictive Exploration Considering Previously Mapped Environments,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2015, pp. 2761-2766. doi:10.1109/ICRA.2015.7139574
    [BibTeX] [PDF]

    The ability to explore an unknown environment is an important prerequisite for building truly autonomous robots. The central decision that a robot needs to make when exploring an unknown environment is to select the next view point(s) for gathering observations. In this paper, we consider the problem of how to select view points that support the underlying mapping process. We propose a novel approach that makes predictions about the structure of the environments in the unexplored areas by relying on maps acquired previously. Our approach seeks to find similarities between the current surroundings of the robot and previously acquired maps stored in a database in order to predict how the environment may expand in the unknown areas. This allows us to predict potential future loop closures early. This knowledge is used in the view point selection to actively close loops and in this way reduce the uncertainty in the robot’s belief. We implemented and tested the proposed approach. The experiments indicate that our method improves the ability of a robot to explore challenging environments and improves the quality of the resulting maps.

    @InProceedings{perea15icra,
    title = {Predictive Exploration Considering Previously Mapped Environments},
    author = {D. Perea-Str{\"o}m and F. Nenci and C. Stachniss},
    booktitle = icra,
    year = {2015},
    pages = {2761-2766},
    abstract = {The ability to explore an unknown environment is an important prerequisite for building truly autonomous robots. The central decision that a robot needs to make when exploring an unknown environment is to select the next view point(s) for gathering observations. In this paper, we consider the problem of how to select view points that support the underlying mapping process. We propose a novel approach that makes predictions about the structure of the environments in the unexplored areas by relying on maps acquired previously. Our approach seeks to find similarities between the current surroundings of the robot and previously acquired maps stored in a database in order to predict how the environment may expand in the unknown areas. This allows us to predict potential future loop closures early. This knowledge is used in the view point selection to actively close loops and in this way reduce the uncertainty in the robot's belief. We implemented and tested the proposed approach. The experiments indicate that our method improves the ability of a robot to explore challenging environments and improves the quality of the resulting maps.},
    doi = {10.1109/ICRA.2015.7139574},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/perea15icra.pdf},
    }

  • R. Roscher, C. Römer, B. Waske, and L. Plümer, “Landcover classification with self-taught learning on archetypal dictionaries,” in IEEE International Geoscience and Remote Sensing Symposium (IGARSS), 2015, pp. 2358-2361. doi:10.1109/IGARSS.2015.7326282
    [BibTeX]
    @InProceedings{roscher2015selftaught,
    title = {Landcover classification with self-taught learning on archetypal dictionaries},
    author = {Roscher, R. and R\"omer, C. and Waske, B. and Pl\"umer, L.},
    booktitle = {{IEEE} International Geoscience and Remote Sensing Symposium (IGARSS)},
    year = {2015},
    month = {July},
    pages = {2358-2361},
    doi = {10.1109/IGARSS.2015.7326282},
    }

  • R. Roscher, B. Uebbing, and J. Kusche, “Spatio-temporal altimeter waveform retracking via sparse representation and conditional random fields,” in IEEE International Geoscience and Remote Sensing Symposium (IGARSS), 2015, pp. 1234-1237. doi:10.1109/IGARSS.2015.7325996
    [BibTeX]
    @InProceedings{roscher2015altimeter,
    title = {Spatio-temporal altimeter waveform retracking via sparse representation and conditional random fields},
    author = {Roscher, R. and Uebbing, B. and Kusche, J.},
    booktitle = {{IEEE} International Geoscience and Remote Sensing Symposium (IGARSS)},
    year = {2015},
    month = {July},
    pages = {1234-1237},
    doi = {10.1109/IGARSS.2015.7325996},
    }

  • R. Roscher and B. Waske, “Shapelet-Based Sparse Representation for Landcover Classification of Hyperspectral Images,” IEEE Transactions on Geoscience and Remote Sensing, vol. 54, iss. 3, p. 1623–1634, 2015. doi:10.1109/TGRS.2015.2484619
    [BibTeX]

    This paper presents a sparse-representation-based classification approach with a novel dictionary construction procedure. By using the constructed dictionary, sophisticated prior knowledge about the spatial nature of the image can be integrated. The approach is based on the assumption that each image patch can be factorized into characteristic spatial patterns, also called shapelets, and patch-specific spectral information. A set of shapelets is learned in an unsupervised way, and spectral information is embodied by training samples. A combination of shapelets and spectral information is represented in an undercomplete spatial-spectral dictionary for each individual patch, where the elements of the dictionary are linearly combined to a sparse representation of the patch. The patch-based classification is obtained by means of the representation error. Experiments are conducted on three well-known hyperspectral image data sets. They illustrate that our proposed approach shows superior results in comparison to sparse-representation-based classifiers that use only limited spatial information and behaves competitively with or better than state-of-the-art classifiers utilizing spatial information and kernelized sparse-representation-based classifiers.

    @Article{roscher2015shapelet,
    title = {Shapelet-Based Sparse Representation for Landcover Classification of Hyperspectral Images},
    author = {Roscher, R. and Waske, B.},
    journal = {IEEE Transactions on Geoscience and Remote Sensing},
    year = {2015},
    number = {3},
    pages = {1623--1634},
    volume = {54},
    abstract = {This paper presents a sparse-representation-based classification approach with a novel dictionary construction procedure. By using the constructed dictionary, sophisticated prior knowledge about the spatial nature of the image can be integrated. The approach is based on the assumption that each image patch can be factorized into characteristic spatial patterns, also called shapelets, and patch-specific spectral information. A set of shapelets is learned in an unsupervised way, and spectral information is embodied by training samples. A combination of shapelets and spectral information is represented in an undercomplete spatial-spectral dictionary for each individual patch, where the elements of the dictionary are linearly combined to a sparse representation of the patch. The patch-based classification is obtained by means of the representation error. Experiments are conducted on three well-known hyperspectral image data sets. They illustrate that our proposed approach shows superior results in comparison to sparse-representation-based classifiers that use only limited spatial information and behaves competitively with or better than state-of-the-art classifiers utilizing spatial information and kernelized sparse-representation-based classifiers.},
    doi = {10.1109/TGRS.2015.2484619},
    issn = {0196-2892},
    }

  • T. Schubert, “Investigation of Latent Traces Using Hyperspectral Imaging,” bachelor thesis Master Thesis, 2015.
    [BibTeX]

    The detection of traces is a main task of forensic science. A potential method is hyperspectral imaging (HSI) which is the process of recording many narrowband intensity images across a wide range of the light spectrum. From this technique we expect to capture more fluorescence effects than with common Forensic Light Sources (FLS). Specimen of blood, semen and saliva traces in several dilution steps are prepared on cardboard substrate. The hyperspectral images are acquired by scanning with two line sensors of visible and infrared light over the specimen. After an image normalization step we obtain reflectance values arranged as an image plane for each wavelength. The atomic process is initiated by excitation with illumination light such that absorption and elastic scattering cause emission of trace-specific light. In a spectroscopic investigation we can attribute most of the trace-specific signal to chemical interaction of infrared light with the water content of the traces. Image planes (i.e. band images) at infrared wavelengths allow detectability to a much higher level than light of the visible region. Ratio images provide definition of new features which can be established as spectral indices. By these arithmetic operations with image planes we can account for variations in the tissue and make traces even more highlighted towards the fabric. The spectral regions which we obtain at a maximal measure of discriminative power indicate regions known as absorption peaks for biological components such as hemoglobin and water. In this thesis we make latent traces, i.e. non-visible for the human eye, visible up to highest available dilution (1:8000) in infrared data. Hyperspectral images in the region of visible light achieve to detect traces only marginally beyond visibility by human eye. In order to evaluate the detectability of traces we exploit several classifiers to labeled data. We use several dimensionality reduction methods (PCA, LDA, band image and ratio image) in combination with a Maximum Likelihood (ML) classifier assuming normally distributed data. Random Forest builds a competitive approach. In the classification task we retrieve the exact positions of labeled trace preparation up to highest dilution. PCA prior to LDA and ML decision function achieves best results for classifying trace against background. Random Forest is preferable in multiclass classification. On the contrary, neither spectral indices nor classification approaches yield adequate achievements for the application of methods learned on labeled data to other images of specimen with arbitrary fabrics. Customized preprocessing and dimensionality reduction methods achieve no significant reduction of background influence. The proportion of trace-specific signal in the data is not sufficient for this task. We suggest supervision of the illumination light to pointedly initiate trace-specific interference. Concerning field usage of HSI we prefer area-scanning cameras (i.e. image plane acquisition with spectral scanning by a wavelength-tunable bandpass filter). Band and ratio images at established spectral indices qualify for live view screening on an external screen.

    @MastersThesis{schubert2015,
    title = {Investigation of Latent Traces Using Hyperspectral Imaging},
    author = {Till Schubert},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2015},
    type = {bachelor thesis},
    abstract = {The detection of traces is a main task of forensic science. A potential method is hyperspectral imaging (HSI) which is the process of recording many narrowband intensity images across a wide range of the light spectrum. From this technique we expect to capture more fluorescence effects than with common Forensic Light Sources (FLS). Specimen of blood, semen and saliva traces in several dilution steps are prepared on cardboard substrate. The hyperspectral images are acquired by scanning with two line sensors of visible and infrared light over the specimen. After an image normalization step we obtain reflectance values arranged as an image plane for each wavelength. The atomic process is initiated by excitation with illumination light such that absorption and elastic scattering cause emission of trace-specific light. In a spectroscopic investigation we can attribute most of the trace-specific signal to chemical interaction of infrared light with the water content of the traces. Image planes (i.e. band images) at infrared wavelengths allow detectability to a much higher level than light of the visible region. Ratio images provide definition of new features which can be established as spectral indices. By these arithmetic operations with image planes we can account for variations in the tissue and make traces even more highlighted towards the fabric. The spectral regions which we obtain at a maximal measure of discriminative power indicate regions known as absorption peaks for biological components such as hemoglobin and water. In this thesis we make latent traces, i.e. non-visible for the human eye, visible up to highest available dilution (1:8000) in infrared data. Hyperspectral images in the region of visible light achieve to detect traces only marginally beyond visibility by human eye. In order to evaluate the detectability of traces we exploit several classifiers to labeled data. We use several dimensionality reduction methods (PCA, LDA, band image and ratio image) in combination with a Maximum Likelihood (ML) classifier assuming normally distributed data. Random Forest builds a competitive approach. In the classification task we retrieve the exact positions of labeled trace preparation up to highest dilution. PCA prior to LDA and ML decision function achieves best results for classifying trace against background. Random Forest is preferable in multiclass classification. On the contrary, neither spectral indices nor classification approaches yield adequate achievements for the application of methods learned on labeled data to other images of specimen with arbitrary fabrics. Customized preprocessing and dimensionality reduction methods achieve no significant reduction of background influence. The proportion of trace-specific signal in the data is not sufficient for this task. We suggest supervision of the illumination light to pointedly initiate trace-specific interference. Concerning field usage of HSI we prefer area-scanning
    cameras (i.e. image plane acquisition with spectral scanning by a wavelength-tunable bandpass filter). Band and ratio images at established spectral indices qualify for live view screening on an external screen.},
    timestamp = {2015.09.28},
    }

  • C. Siedentop, R. Heinze, D. Kasper, G. Breuel, and C. Stachniss, “Path-Planning for Autonomous Parking with Dubins Curves,” in Proc. of the Workshop Fahrerassistenzsysteme, 2015.
    [BibTeX] [PDF]
    @InProceedings{siedentop15fas,
    title = {Path-Planning for Autonomous Parking with Dubins Curves},
    author = {C. Siedentop and R. Heinze and D. Kasper and G. Breuel and C. Stachniss},
    booktitle = {Proc. of the Workshop Fahrerassistenzsysteme},
    year = {2015},
    }

  • O. Vysotska, T. Naseer, L. Spinello, W. Burgard, and C. Stachniss, “Efficient and Effective Matching of Image Sequences Under Substantial Appearance Changes Exploiting GPS Prior,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2015, pp. 2774-2779. doi:10.1109/ICRA.2015.7139576
    [BibTeX] [PDF] [Code]
    @InProceedings{vysotska15icra,
    title = {Efficient and Effective Matching of Image Sequences Under Substantial Appearance Changes Exploiting GPS Prior},
    author = {O. Vysotska and T. Naseer and L. Spinello and W. Burgard and C. Stachniss},
    booktitle = icra,
    year = {2015},
    pages = {2774-2779},
    doi = {10.1109/ICRA.2015.7139576},
    timestamp = {2015.06.29},
    url = {https://www.ipb.uni-bonn.de/pdfs/vysotska15icra.pdf},
    codeurl = {https://github.com/ovysotska/image_sequence_matcher},
    }

  • O. Vysotska and C. Stachniss, “Lazy Sequences Matching Under Substantial Appearance Changes,” in Workshop on Visual Place Recognition in Changing Environments at the IEEE Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), 2015.
    [BibTeX] [PDF]
    [none]
    @InProceedings{vysotska15icraws,
    title = {Lazy Sequences Matching Under Substantial Appearance Changes},
    author = {O. Vysotska and C. Stachniss},
    booktitle = {Workshop on Visual Place Recognition in Changing Environments at the IEEE } # icra,
    year = {2015},
    abstract = {[none]},
    timestamp = {2015.06.29},
    url = {https://www.ipb.uni-bonn.de/pdfs/vysotska15icra-ws.pdf},
    }

2014

  • B. Frank, C. Stachniss, R. Schmedding, M. Teschner, and W. Burgard, “Learning object deformation models for robot motion planning,” Robotics and Autonomous Systems, p. -, 2014. doi:https://dx.doi.org/10.1016/j.robot.2014.04.005
    [BibTeX] [PDF]
    [none]
    @Article{frank2014,
    title = {Learning object deformation models for robot motion planning },
    author = {Barbara Frank and Cyrill Stachniss and R\"{u}diger Schmedding and Matthias Teschner and Wolfram Burgard},
    journal = {Robotics and Autonomous Systems },
    year = {2014},
    pages = { - },
    abstract = {[none]},
    crossref = {mn},
    doi = {https://dx.doi.org/10.1016/j.robot.2014.04.005},
    issn = {0921-8890},
    keywords = {Mobile robots},
    url = {https://www.sciencedirect.com/science/article/pii/S0921889014000797},
    }

  • N. Abdo, L. Spinello, W. Burgard, and C. Stachniss, “Inferring What to Imitate in Manipulation Actions by Using a Recommender System,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Hong Kong, China, 2014.
    [BibTeX] [PDF]
    @InProceedings{abdo2014icra,
    title = {Inferring What to Imitate in Manipulation Actions by Using a Recommender System},
    author = {N. Abdo and L. Spinello and W. Burgard and C. Stachniss},
    booktitle = icra,
    year = {2014},
    address = {Hong Kong, China},
    }

  • P. Agarwal, W. Burgard, and C. Stachniss, “Helmert’s and Bowie’s Geodetic Mapping Methods and Their Relation to Graph-Based SLAM,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Hong Kong, China, 2014.
    [BibTeX]
    @InProceedings{agarwal2014icra,
    title = {Helmert's and Bowie's Geodetic Mapping Methods and Their Relation to Graph-Based SLAM},
    author = {P. Agarwal and W. Burgard and C. Stachniss},
    booktitle = icra,
    year = {2014},
    address = {Hong Kong, China},
    timestamp = {2014.04.24},
    }

  • P. Agarwal, W. Burgard, and C. Stachniss, “A Survey of Geodetic Approaches to Mapping and the Relationship to Graph-Based SLAM,” IEEE Robotics and Automation Magazine, vol. 21, pp. 63-80, 2014. doi:10.1109/MRA.2014.2322282
    [BibTeX] [PDF]

    The ability to simultaneously localize a robot and build a map of the environment is central to most robotics applications, and the problem is often referred to as simultaneous localization and mapping (SLAM). Robotics researchers have proposed a large variety of solutions allowing robots to build maps and use them for navigation. In addition, the geodetic community has addressed large-scale map building for centuries, computing maps that span across continents. These large-scale mapping processes had to deal with several challenges that are similar to those of the robotics community. In this article, we explain key geodetic map building methods that we believe are relevant for robot mapping. We also aim at providing a geodetic perspective on current state-of-the-art SLAM methods and identifying similarities both in terms of challenges faced and the solutions proposed by both communities. The central goal of this article is to connect both fields and enable future synergies between them.

    @Article{agarwal2014ram,
    title = {A Survey of Geodetic Approaches to Mapping and the Relationship to Graph-Based SLAM},
    author = {Pratik Agarwal and Wolfram Burgard and Cyrill Stachniss},
    journal = {IEEE Robotics and Automation Magazine},
    year = {2014},
    pages = {63 - 80},
    volume = {21},
    abstract = {The ability to simultaneously localize a robot and build a map of the environment is central to most robotics applications, and the problem is often referred to as simultaneous localization and mapping (SLAM). Robotics researchers have proposed a large variety of solutions allowing robots to build maps and use them for navigation. In addition, the geodetic community has addressed large-scale map building for centuries, computing maps that span across continents. These large-scale mapping processes had to deal with several challenges that are similar to those of the robotics community. In this article, we explain key geodetic map building methods that we believe are relevant for robot mapping. We also aim at providing a geodetic perspective on current state-of-the-art SLAM methods and identifying similarities both in terms of challenges faced and the solutions proposed by both communities. The central goal of this article is to connect both fields and enable future synergies between them.},
    doi = {10.1109/MRA.2014.2322282},
    timestamp = {2014.09.18},
    }

  • P. Agarwal, G. Grisetti, G. D. Tipaldi, L. Spinello, W. Burgard, and C. Stachniss, “Experimental Analysis of Dynamic Covariance Scaling for Robust Map Optimization Under Bad Initial Estimates,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Hong Kong, China, 2014.
    [BibTeX]
    [none]
    @InProceedings{agarwal2014-dcs,
    title = {Experimental Analysis of Dynamic Covariance Scaling for Robust Map Optimization Under Bad Initial Estimates},
    author = {P. Agarwal and G. Grisetti and G.D. Tipaldi and L. Spinello and W. Burgard and C. Stachniss},
    booktitle = icra,
    year = {2014},
    address = {Hong Kong, China},
    abstract = {[none]},
    timestamp = {2014.04.24},
    }

  • M. Flick, “Localisation Using Open Street Map Data,” bachelor thesis Master Thesis, 2014.
    [BibTeX]

    The goal of this project is to build an online localisation system that localises a vehicle by using OpenStreetMap data and a record of the driven path. Since the Global Positioning Service (GPS) can only be used reliably when the satellite signal can be clearly received, which is when positioned outside of buildings and clear interfering signals, it is only accessible for a certain group of users. Furthermore, it can only be used to the conditions of the US government, as it is them who maintain the GPS-system. Our project develops an alternative for localisation by using independent data, such as OpenStreetMap data and measurements of the driven vehicle, the odometry. This approach uses a particle filter to localise a vehicle. It is a sampling approach that samples complex posterior densities over state spaces. Samples, called particles, are resampled according to the likelihood of the vehicle being at that position. To calculate this information, the position of a particle is weighted. A chamfer matching function performs this task by comparing the driven odometry to OpenStreetMap data and finding the best matches. Chamfer matching evaluates the match of edges to query image. The more similar the current odometry to the query image the better the match. According to the euclidean distance of the particles to their nearest match, the importance of the particle is measured. The particle filter loops over time and with each measurement update the particles move according to their motion update and conglomerate on the most likely position. Assuming that this approach can work in real time and with high accuracy, it is usable on its own with free accessible and contemporary geodata. For this purpose the vehicle tracks its driven path, for example by wheel odometry, and both, the track and the OpenStreetMap data, are evaluated during the runtime of the program to get the current position. We show that the particle filter compensates uncertainties of measurement on the basis of loose measuring by performing a robust sampling update. A novel feature of this approach is that we show that the type of odometry does not matter as the chamfer matching and due to the robustness of the particle filter this can overcome. We demonstrate the located position of the vehicle in comparing it to the GPS position of the vehicle to show the difference and accuracy. Also, we will compare the runtime efficiency of GPS to that of the combination of particle filter and chamfer matching approach.

    @MastersThesis{flick2014localisation,
    title = {Localisation Using Open Street Map Data},
    author = {Mareike Flick},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2014},
    type = {bachelor thesis},
    abstract = {The goal of this project is to build an online localisation system that localises a vehicle by using OpenStreetMap data and a record of the driven path. Since the Global Positioning Service (GPS) can only be used reliably when the satellite signal can be clearly received, which is when positioned outside of buildings and clear interfering signals, it is only accessible for a certain group of users. Furthermore, it can only be used to the conditions of the US government, as it is them who maintain the GPS-system. Our project develops an alternative for localisation by using independent data, such as OpenStreetMap data and measurements of the driven vehicle, the odometry. This approach uses a particle filter to localise a vehicle. It is a sampling approach that samples complex posterior densities over state spaces. Samples, called particles, are resampled according to the likelihood of the vehicle being at that position. To calculate this information, the position of a particle is weighted. A chamfer matching function performs this task by comparing the driven odometry to OpenStreetMap data and finding the best matches. Chamfer matching evaluates the match of edges to query image. The more similar the current odometry to the query image the better the match. According to the euclidean distance of the particles to their nearest match, the importance of the particle is measured. The particle filter loops over time and with each measurement update the particles move according to their motion update and conglomerate on the most likely position. Assuming that this approach can work in real time and with high accuracy, it is usable on its own with free accessible and contemporary geodata. For this purpose the vehicle tracks its driven path, for example by wheel odometry, and both, the track and the OpenStreetMap data, are evaluated during the runtime of the program to get the current position. We show that the particle filter compensates uncertainties of measurement on the basis of loose measuring by performing a robust sampling update. A novel feature of this approach is that we show that the type of odometry does not matter as the chamfer matching and due to the robustness of the particle filter this can overcome. We demonstrate the located position of the vehicle in comparing it to the GPS position of the vehicle to show the difference and accuracy. Also, we will compare the runtime efficiency of GPS to that of the combination of particle filter and chamfer matching approach.},
    timestamp = {2015.01.19},
    }

  • K. Franz, “Bestimmung der Trajektorie des ATV-4 bei der Separation von der Ariane-5 Oberstufe aus einer Stereo-Bildsequenz,” bachelor thesis Master Thesis, 2014.
    [BibTeX]

    The successfull launch of the spacecraft, ATV-4, on June 5, 2013 by the German Aerospace Center (abbr. DLR) and the European Space Agency (abbr. ESA) is a relevant issue in photogrammetric regard. For the first time, the seperation process and the first seconds of space flight of an automated transfer vehicle could be recorded, tracked and supervised by assembling a stereo camerasystem at the Ariane rocket. This monitoring task includes the reconstruction of the ATV’s trajectory in the stereo image sequence. As main goal of this bachelor thesis we developed a routine that derives this trajectory. Our approach is based on object tracking by implementing a KLT-Tracker. First, interesting points have to be detected in a region of interest and be tracked over time. The homologous points in the stereo image partner can be extracted by template matching with subpixel precision. Subsequently, the object coordinates can be calculated by spatial intersection. With the resulting 3D point clouds the motion can be computed. However, numerous analyses have shown, that the reconstruction of the ATV’s trajectory is very insufficient due to the mechanical constellations and also to a missing photogrammetric calibration. To overcome this and to get more suitable data, it was necessary to create a test scenario. This data allow a more realistic validation of our approach. The records of the test scenario are realized with a comparable stereo camerasystem to the DLR configurations. However, a photogrammetric calibration is performed for this system. In addition, long distances to the camerasystems are avoided, since such long distances cause problems in the DLR sequence. Here, the stereoscopical evaluation can fail soon. Nevertheless, the trajectory of the ATV is reconstructable. For this purpose, the stereo image sequences have to be shortened. Only about two thirds of the sequence can be used as input for this method. Because of the missing stochastical information the resulting uncertainties can not be adjusted. Especially the implementation on the test data revealed that our approach generates reasonable trajectories.

    @MastersThesis{franz2014,
    title = {Bestimmung der Trajektorie des ATV-4 bei der Separation von der Ariane-5 Oberstufe aus einer Stereo-Bildsequenz},
    author = {Katharina Franz},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2014},
    type = {bachelor thesis},
    abstract = {The successfull launch of the spacecraft, ATV-4, on June 5, 2013 by the German Aerospace Center (abbr. DLR) and the European Space Agency (abbr. ESA) is a relevant issue in photogrammetric regard. For the first time, the seperation process and the first seconds of space flight of an automated transfer vehicle could be recorded, tracked and supervised by assembling a stereo camerasystem at the Ariane rocket. This monitoring task includes the reconstruction of the ATV's trajectory in the stereo image sequence. As main goal of this bachelor thesis we developed a routine that derives this trajectory. Our approach is based on object tracking by implementing a KLT-Tracker. First, interesting points have to be detected in a region of interest and be tracked over time. The homologous points in the stereo image partner can be extracted by template matching with subpixel precision. Subsequently, the object coordinates can be calculated by spatial intersection. With the resulting 3D point clouds the motion can be computed. However, numerous analyses have shown, that the reconstruction of the ATV's trajectory is very insufficient due to the mechanical constellations and also to a missing photogrammetric calibration. To overcome this and to get more suitable data, it was necessary to create a test scenario. This data allow a more realistic validation of our approach. The records of the test scenario are realized with a comparable stereo camerasystem to the DLR configurations. However, a photogrammetric calibration is performed for this system. In addition, long distances to the camerasystems are avoided, since such long distances cause problems in the DLR sequence. Here, the stereoscopical evaluation can fail soon. Nevertheless, the trajectory of the ATV is reconstructable. For this purpose, the stereo image sequences have to be shortened. Only about two thirds of the sequence can be used as input for this method. Because of the missing stochastical information the resulting uncertainties can not be adjusted. Especially the implementation on the test data revealed that our approach generates reasonable trajectories.},
    timestamp = {2014.09.30},
    }

  • R. Hagensieker, R. Roscher, and B. Waske, “Texture-based classification of a tropical forest area using multi-temporal ASAR data,” in IEEE International Geoscience and Remote Sensing Symposium (IGARSS), 2014.
    [BibTeX]
    [none]
    @InProceedings{hagensieker2014texture,
    title = {Texture-based classification of a tropical forest area using multi-temporal ASAR data},
    author = {Hagensieker, Ron and Roscher, Ribana and Waske, Bj{\"o}rn},
    booktitle = {IEEE International Geoscience and Remote Sensing Symposium (IGARSS)},
    year = {2014},
    abstract = {[none]},
    owner = {ribana},
    timestamp = {2014.11.04},
    }

  • K. Herzog, R. Roscher, M. Wieland, A. Kicherer, T. Läbe, W. Förstner, H. Kuhlmann, and R. Töpfer, “Initial steps for high-throughput phenotyping in vineyards,” VITIS – Journal of Grapevine Research, vol. 53, iss. 1, p. 1–8, 2014.
    [BibTeX]

    The evaluation of phenotypic characters of grape- vines is required directly in the vineyard and is strongly limited by time, costs and the subjectivity of person in charge. Sensor-based techniques are prerequisite to al- low non-invasive phenotyping of individual plant traits, to increase the quantity of object records and to reduce error variation. Thus, a Prototype-Image-Acquisition- System (PIAS) was developed for semi-automated cap- ture of geo-referenced RGB images in an experimental vineyard. Different strategies were tested for image in- terpretation using Matlab. The interpretation of imag- es from the vineyard with the real background is more practice-oriented but requires the calculation of depth maps. Images were utilised to verify the phenotyping results of two semi-automated and one automated pro- totype image interpretation framework. The semi-auto- mated procedures enable contactless and non-invasive detection of bud burst and quantification of shoots at an early developmental stage (BBCH 10) and enable fast and accurate determination of the grapevine berry size at BBCH 89. Depending on the time of image ac- quisition at BBCH 10 up to 94 \% of green shoots were visible in images. The mean berry size (BBCH 89) was recorded non-invasively with a precision of 1 mm.

    @Article{herzog2014initial,
    title = {Initial steps for high-throughput phenotyping in vineyards},
    author = {Herzog, Katja and Roscher, Ribana and Wieland, Markus and Kicherer,Anna and L\"abe, Thomas and F\"orstner, Wolfgang and Kuhlmann, Heiner and T\"opfer, Reinhard},
    journal = {VITIS - Journal of Grapevine Research},
    year = {2014},
    month = jan,
    number = {1},
    pages = {1--8},
    volume = {53},
    abstract = {The evaluation of phenotypic characters of grape- vines is required directly in the vineyard and is strongly limited by time, costs and the subjectivity of person in charge. Sensor-based techniques are prerequisite to al- low non-invasive phenotyping of individual plant traits, to increase the quantity of object records and to reduce error variation. Thus, a Prototype-Image-Acquisition- System (PIAS) was developed for semi-automated cap- ture of geo-referenced RGB images in an experimental vineyard. Different strategies were tested for image in- terpretation using Matlab. The interpretation of imag- es from the vineyard with the real background is more practice-oriented but requires the calculation of depth maps. Images were utilised to verify the phenotyping results of two semi-automated and one automated pro- totype image interpretation framework. The semi-auto- mated procedures enable contactless and non-invasive detection of bud burst and quantification of shoots at an early developmental stage (BBCH 10) and enable fast and accurate determination of the grapevine berry size at BBCH 89. Depending on the time of image ac- quisition at BBCH 10 up to 94 \% of green shoots were visible in images. The mean berry size (BBCH 89) was recorded non-invasively with a precision of 1 mm.},
    }

  • S. Ito, F. Endres, M. Kuderer, G. D. Tipaldi, C. Stachniss, and W. Burgard, “W-RGB-D: Floor-Plan-Based Indoor Global Localization Using a Depth Camera and WiFi,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Hong Kong, China, 2014.
    [BibTeX] [PDF]
    [none]
    @InProceedings{ito2014,
    title = {W-RGB-D: Floor-Plan-Based Indoor Global Localization Using a Depth Camera and WiFi},
    author = {S. Ito and F. Endres and M. Kuderer and G.D. Tipaldi and C. Stachniss and W. Burgard},
    booktitle = icra,
    year = {2014},
    address = {Hong Kong, China},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www2.informatik.uni-freiburg.de/~tipaldi/papers/ito14icra.pdf},
    }

  • R. Kümmerle, M. Ruhnke, B. Steder, C. Stachniss, and W. Burgard, “Autonomous Robot Navigation in Highly Populated Pedestrian Zones,” Journal of Field Robotics (JFR), 2014. doi:10.1002/rob.21534
    [BibTeX] [PDF]
    [none]
    @Article{kummerle14jfr,
    title = {Autonomous Robot Navigation in Highly Populated Pedestrian Zones},
    author = {K{\"u}mmerle, Rainer and Ruhnke, Michael and Steder, Bastian and Stachniss,Cyrill and Burgard, Wolfram},
    journal = jfr,
    year = {2014},
    abstract = {[none]},
    doi = {10.1002/rob.21534},
    timestamp = {2015.01.22},
    url = {https://ais.informatik.uni-freiburg.de/publications/papers/kuemmerle14jfr.pdf},
    }

  • A. Kicherer, R. Roscher, K. Herzog, W. Förstner, and R. Töpfer, “Image based Evaluation for the Detection of Cluster Parameters in Grapevine,” in Acta horticulturae, 2014.
    [BibTeX]
    @InProceedings{kicherer2014evaluation,
    title = {Image based Evaluation for the Detection of Cluster Parameters in Grapevine},
    author = {Kicherer, A. and Roscher, R. and Herzog, K. and F\"orstner, W. and T\"opfer, R.},
    booktitle = {Acta horticulturae},
    year = {2014},
    owner = {ribana},
    timestamp = {2016.06.20},
    }

  • L. Klingbeil, M. Nieuwenhuisen, J. Schneider, C. Eling, D. Droeschel, D. Holz, T. Läbe, W. Förstner, S. Behnke, and H. Kuhlmann, “Towards Autonomous Navigation of an UAV-based Mobile Mapping System,” in 4th International Conf. on Machine Control & Guidance, 2014, p. 136–147.
    [BibTeX] [PDF]

    For situations, where mapping is neither possible from high altitudes nor from the ground, we are developing an autonomous micro aerial vehicle able to fly at low altitudes in close vicinity of obstacles. This vehicle is based on a MikroKopterTM octocopter platform (maximum total weight: 5kg), and contains a dual frequency GPS board, an IMU, a compass, two stereo camera pairs with fisheye lenses, a rotating 3D laser scanner, 8 ultrasound sensors, a real-time processing unit, and a compact PC for on-board ego-motion estimation and obstacle detection for autonomous navigation. A high-resolution camera is used for the actual mapping task, where the environment is reconstructed in three dimensions from images, using a highly accurate bundle adjustment. In this contribution, we describe the sensor system setup and present results from the evaluation of several aspects of the different subsystems as well as initial results from flight tests.

    @InProceedings{klingbeil14mcg,
    title = {Towards Autonomous Navigation of an UAV-based Mobile Mapping System},
    author = {Klingbeil, Lasse and Nieuwenhuisen, Matthias and Schneider, Johannes and Eling, Christian and Droeschel, David and Holz, Dirk and L\"abe, Thomas and F\"orstner, Wolfgang and Behnke, Sven and Kuhlmann, Heiner},
    booktitle = {4th International Conf. on Machine Control \& Guidance},
    year = {2014},
    pages = {136--147},
    abstract = {For situations, where mapping is neither possible from high altitudes nor from the ground, we are developing an autonomous micro aerial vehicle able to fly at low altitudes in close vicinity of obstacles. This vehicle is based on a MikroKopterTM octocopter platform (maximum total weight: 5kg), and contains a dual frequency GPS board, an IMU, a compass, two stereo camera pairs with fisheye lenses, a rotating 3D laser scanner, 8 ultrasound sensors, a real-time processing unit, and a compact PC for on-board ego-motion estimation and obstacle detection for autonomous navigation. A high-resolution camera is used for the actual mapping task, where the environment is reconstructed in three dimensions from images, using a highly accurate bundle adjustment. In this contribution, we describe the sensor system setup and present results from the evaluation of several aspects of the different subsystems as well as initial results from flight tests.},
    url = {https://www.ipb.uni-bonn.de/pdfs/klingbeil14mcg.pdf},
    }

  • B. Mack, R. Roscher, and B. Waske, “Can I trust my one-class classification%3F,” Remote Sensing, vol. 6, iss. 9, p. 8779–8802, 2014.
    [BibTeX] [PDF]

    Contrary to binary and multi-class classifiers, the purpose of a one-class classifier for remote sensing applications is to map only one specific land use/land cover class of interest. Training these classifiers exclusively requires reference data for the class of interest, while training data for other classes is not required. Thus, the acquisition of reference data can be significantly reduced. However, one-class classification is fraught with uncertainty and full automatization is difficult, due to the limited reference information that is available for classifier training. Thus, a user-oriented one-class classification strategy is proposed, which is based among others on the visualization and interpretation of the one-class classifier outcomes during the data processing. Careful interpretation of the diagnostic plots fosters the understanding of the classification outcome, e.g., the class separability and suitability of a particular threshold. In the absence of complete and representative validation data, which is the fact in the context of a real one-class classification application, such information is valuable for evaluation and improving the classification. The potential of the proposed strategy is demonstrated by classifying different crop types with hyperspectral data from Hyperion.

    @Article{mack2014can,
    title = {Can I trust my one-class classification?},
    author = {Mack, Benjamin and Roscher, Ribana and Waske, Bj{\"o}rn},
    journal = {Remote Sensing},
    year = {2014},
    number = {9},
    pages = {8779--8802},
    volume = {6},
    abstract = {Contrary to binary and multi-class classifiers, the purpose of a one-class classifier for remote sensing applications is to map only one specific land use/land cover class of interest. Training these classifiers exclusively requires reference data for the class of interest, while training data for other classes is not required. Thus, the acquisition of reference data can be significantly reduced. However, one-class classification is fraught with uncertainty and full automatization is difficult, due to the limited reference information that is available for classifier training. Thus, a user-oriented one-class classification strategy is proposed, which is based among others on the visualization and interpretation of the one-class classifier outcomes during the data processing. Careful interpretation of the diagnostic plots fosters the understanding of the classification outcome, e.g., the class separability and suitability of a particular threshold. In the absence of complete and representative validation data, which is the fact in the context of a real one-class classification application, such information is valuable for evaluation and improving the classification. The potential of the proposed strategy is demonstrated by classifying different crop types with hyperspectral data from Hyperion.},
    owner = {ribana},
    timestamp = {2014.11.04},
    url = {https://www.ipb.uni-bonn.de/pdfs/Mack2014Can.pdf},
    }

  • M. Mazuran, G. D. Tipaldi, L. Spinello, W. Burgard, and C. Stachniss, “A Statistical Measure for Map Consistency in SLAM,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Hong Kong, China, 2014.
    [BibTeX] [PDF]
    @InProceedings{mazuran2014icra,
    title = {A Statistical Measure for Map Consistency in SLAM},
    author = {M. Mazuran and G.D. Tipaldi and L. Spinello and W. Burgard and C. Stachniss},
    booktitle = icra,
    year = {2014},
    address = {Hong Kong, China},
    timestamp = {2014.04.24},
    }

  • T. Naseer, L. Spinello, W. Burgard, and Stachniss, “Robust Visual Robot Localization Across Seasons using Network Flows,” in Proc. of the National Conf. on Artificial Intellience (AAAI), 2014.
    [BibTeX] [PDF]
    [none]
    @InProceedings{naseer2014aaai,
    title = {Robust Visual Robot Localization Across Seasons using Network Flows},
    author = {Naseer, T. and Spinello, L. and Burgard, W. and Stachniss},
    booktitle = aaai,
    year = {2014},
    abstract = {[none]},
    timestamp = {2014.05.12},
    }

  • F. Nenci, L. Spinello, and C. Stachniss, “Effective Compression of Range Data Streams for Remote Robot Operations using H.264,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2014.
    [BibTeX] [PDF]
    @InProceedings{nenci2014iros,
    title = {Effective Compression of Range Data Streams for Remote Robot Operations using H.264},
    author = {Fabrizio Nenci and Luciano Spinello and Cyrill Stachniss},
    booktitle = iros,
    year = {2014},
    }

  • S. Oßwald, H. Kretzschmar, W. Burgard, and C. Stachniss, “Learning to Give Route Directions from Human Demonstrations,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Hong Kong, China, 2014.
    [BibTeX] [PDF]
    @InProceedings{osswald2014icra,
    title = {Learning to Give Route Directions from Human Demonstrations},
    author = {S. O{\ss}wald and H. Kretzschmar and W. Burgard and C. Stachniss},
    booktitle = icra,
    year = {2014},
    address = {Hong Kong, China},
    }

  • R. Roscher, K. Herzog, A. Kunkel, A. Kicherer, R. Töpfer, and W. Förstner, “Automated image analysis framework for high-throughput determination of grapevine berry sizes using conditional random fields,” Computers and Electronics in Agriculture, vol. 100, p. 148–158, 2014. doi:10.1016/j.compag.2013.11.008
    [BibTeX]
    @Article{roscher2014automated,
    title = {Automated image analysis framework for high-throughput determination of grapevine berry sizes using conditional random fields},
    author = {Roscher, Ribana and Herzog, Katja and Kunkel, Annemarie and Kicherer, Anna and T{\"o}pfer, Reinhard and F{\"o}rstner, Wolfgang},
    journal = {Computers and Electronics in Agriculture},
    year = {2014},
    pages = {148--158},
    volume = {100},
    doi = {10.1016/j.compag.2013.11.008},
    publisher = {Elsevier},
    }

  • R. Roscher and B. Waske, “Shapelet-based sparse image representation for landcover classification of hyperspectral data,” in IAPR Workshop on Pattern Recognition in Remote Sensing, 2014, p. 1–6.
    [BibTeX] [PDF]

    This paper presents a novel sparse representation-based classifier for landcover mapping of hyperspectral image data. Each image patch is factorized into segmentation patterns, also called shapelets, and patch-specific spectral features. The combination of both is represented in a patch-specific spatial-spectral dictionary, which is used for a sparse coding procedure for the reconstruction and classification of image patches. Hereby, each image patch is sparsely represented by a linear combination of elements out of the dictionary. The set of shapelets is specifically learned for each image in an unsupervised way in order to capture the image structure. The spectral features are assumed to be the training data. The experiments show that the proposed approach shows superior results in comparison to sparse-representation based classifiers that use no or only limited spatial information and behaves competitive or better than state-of-the-art classifiers utilizing spatial information and kernelized sparse representation-based classifiers.

    @InProceedings{roscher2014shapelet,
    title = {Shapelet-based sparse image representation for landcover classification of hyperspectral data},
    author = {Roscher, Ribana and Waske, Bj{\"o}rn},
    booktitle = {IAPR Workshop on Pattern Recognition in Remote Sensing},
    year = {2014},
    pages = {1--6},
    abstract = {This paper presents a novel sparse representation-based classifier for landcover mapping of hyperspectral image data. Each image patch is factorized into segmentation patterns, also called shapelets, and patch-specific spectral features. The combination of both is represented in a patch-specific spatial-spectral dictionary, which is used for a sparse coding procedure for the reconstruction and classification of image patches. Hereby, each image patch is sparsely represented by a linear combination of elements out of the dictionary. The set of shapelets is specifically learned for each image in an unsupervised way in order to capture the image structure. The spectral features are assumed to be the training data. The experiments show that the proposed approach shows superior results in comparison to sparse-representation based classifiers that use no or only limited spatial information and behaves competitive or better than state-of-the-art classifiers utilizing spatial information and kernelized sparse representation-based classifiers.},
    owner = {ribana},
    timestamp = {2014.11.04},
    url = {https://www.ipb.uni-bonn.de/pdfs/Roscher2014Shapelet.pdf},
    }

  • R. Roscher and B. Waske, “Superpixel-based classification of hyperspectral data using sparse representation and conditional random fields,” in IEEE International Geoscience and Remote Sensing Symposium (IGARSS), 2014.
    [BibTeX] [PDF]

    This paper presents a superpixel-based classifier for landcover mapping of hyperspectral image data. The approach relies on the sparse representation of each pixel by a weighted linear combination of the training data. Spatial information is incorporated by using a coarse patch-based neighborhood around each pixel as well as data-adapted superpixels. The classification is done via a hierarchical conditional random field, which utilizes the sparse-representation output and models spatial and hierarchical structures in the hyperspectral image. The experiments show that the proposed approach results in superior accuracies in comparison to sparse-representation based classifiers that solely use a patch-based neighborhood.

    @InProceedings{roscher2014superpixel,
    title = {Superpixel-based classification of hyperspectral data using sparse representation and conditional random fields},
    author = {Roscher, Ribana and Waske, Bj{\"o}rn},
    booktitle = {{IEEE} International Geoscience and Remote Sensing Symposium (IGARSS)},
    year = {2014},
    abstract = {This paper presents a superpixel-based classifier for landcover mapping of hyperspectral image data. The approach relies on the sparse representation of each pixel by a weighted linear combination of the training data. Spatial information is incorporated by using a coarse patch-based neighborhood around each pixel as well as data-adapted superpixels. The classification is done via a hierarchical conditional random field, which utilizes the sparse-representation output and models spatial and hierarchical structures in the hyperspectral image. The experiments show that the proposed approach results in superior accuracies in comparison to sparse-representation based classifiers that solely use a patch-based neighborhood.},
    owner = {ribana},
    timestamp = {2014.11.04},
    url = {https://www.ipb.uni-bonn.de/pdfs/Roscher2014Superpixel.pdf},
    }

  • J. Schneider and W. Förstner, “Real-time Accurate Geo-localization of a MAV with Omnidirectional Visual Odometry and GPS,” in Computer Vision – ECCV 2014 Workshops, 2014, p. 271–282. doi:10.1007/978-3-319-16178-5_18
    [BibTeX] [PDF]

    This paper presents a system for direct geo-localization of a MAV in an unknown environment using visual odometry and precise real time kinematic (RTK) GPS information. Visual odometry is performed with a multi-camera system with four fisheye cameras that cover a wide field of view which leads to better constraints for localization due to long tracks and a better intersection geometry. Visual observations from the acquired image sequences are refined with a high accuracy on selected keyframes by an incremental bundle adjustment using the iSAM2 algorithm. The optional integration of GPS information yields long-time stability and provides a direct geo-referenced solution. Experiments show the high accuracy which is below 3 cm standard deviation in position.

    @InProceedings{schneider14eccv-ws,
    title = {Real-time Accurate Geo-localization of a MAV with Omnidirectional Visual Odometry and GPS},
    author = {J. Schneider and W. F\"orstner},
    booktitle = {Computer Vision - ECCV 2014 Workshops},
    year = {2014},
    pages = {271--282},
    abstract = {This paper presents a system for direct geo-localization of a MAV in an unknown environment using visual odometry and precise real time kinematic (RTK) GPS information. Visual odometry is performed with a multi-camera system with four fisheye cameras that cover a wide field of view which leads to better constraints for localization due to long tracks and a better intersection geometry. Visual observations from the acquired image sequences are refined with a high accuracy on selected keyframes by an incremental bundle adjustment using the iSAM2 algorithm. The optional integration of GPS information yields long-time stability and provides a direct geo-referenced solution. Experiments show the high accuracy which is below 3 cm standard deviation in position.},
    doi = {10.1007/978-3-319-16178-5_18},
    url = {https://www.ipb.uni-bonn.de/pdfs/schneider14eccv-ws.pdf},
    }

  • J. Schneider, T. Läbe, and W. Förstner, “Real-Time Bundle Adjustment with an Omnidirectional Multi-Camera System and GPS,” in Proc. of the 4th International Conf. on Machine Control & Guidance, 2014, p. 98–103.
    [BibTeX] [PDF]

    In this paper we present our system for visual odometry that performs a fast incremental bundle adjustment for real-time structure and motion estimation in an unknown scene. It is applicable to image streams of a calibrated multi-camera system with omnidirectional cameras. In this paper we use an autonomously flying octocopter that is equipped for visual odometry and obstacle detection with four fisheye cameras, which provide a large field of view. For real-time ego-motion estimation the platform is equipped, besides the cameras, with a dual frequency GPS board, an IMU and a compass. In this paper we show how we apply our system for visual odometry using the synchronized video streams of the four fisheye cameras. The position and orientation information from the GPS-unit and the inertial sensors can optionally be integrated into our system. We will show the obtained accuracy of pure odometry and compare it with the solution from GPS/INS.

    @InProceedings{schneider14mcg,
    title = {Real-Time Bundle Adjustment with an Omnidirectional Multi-Camera System and GPS},
    author = {J. Schneider and T. L\"abe and W. F\"orstner},
    booktitle = {Proc. of the 4th International Conf. on Machine Control \& Guidance},
    year = {2014},
    pages = {98--103},
    abstract = {In this paper we present our system for visual odometry that performs a fast incremental bundle adjustment for real-time structure and motion estimation in an unknown scene. It is applicable to image streams of a calibrated multi-camera system with omnidirectional cameras. In this paper we use an autonomously flying octocopter that is equipped for visual odometry and obstacle detection with four fisheye cameras, which provide a large field of view. For real-time ego-motion estimation the platform is equipped, besides the cameras, with a dual frequency GPS board, an IMU and a compass. In this paper we show how we apply our system for visual odometry using the synchronized video streams of the four fisheye cameras. The position and orientation information from the GPS-unit and the inertial sensors can optionally be integrated into our system. We will show the obtained accuracy of pure odometry and compare it with the solution from GPS/INS.},
    city = {Braunschweig},
    url = {https://www.ipb.uni-bonn.de/pdfs/schneider14mcg.pdf},
    }

  • C. Stachniss and W. Burgard, “Particle Filters for Robot Navigation,” Foundations and Trends in Robotics, vol. 3, iss. 4, pp. 211-282, 2014. doi:10.1561/2300000013
    [BibTeX] [PDF]
    [none]
    @Article{stachniss2014,
    title = {Particle Filters for Robot Navigation},
    author = {C. Stachniss and W. Burgard},
    journal = fntr,
    year = {2014},
    month = {2012, published 2014},
    number = {4},
    pages = {211-282},
    volume = {3},
    abstract = {[none]},
    doi = {10.1561/2300000013},
    timestamp = {2014.04.24},
    url = {https://www.nowpublishers.com/articles/foundations-and-trends-in-robotics/ROB-013},
    }

  • J. Stefanski, O. Chaskovskyy, and B. Waske, “Mapping and monitoring of land use changes in post-Soviet western Ukraine using remote sensing data,” Applied Geography, vol. 55, p. 155–164, 2014. doi:10.1016/j.apgeog.2014.08.003
    [BibTeX]

    While agriculture is expanded and intensified in many parts of the world, decreases in land use intensity and farmland abandonment take place in other parts. Eastern Europe experienced widespread changes of agricultural land use after the collapse of the Soviet Union in 1991, however, rates and patterns of these changes are still not well understood. Our objective was to map and analyze changes of land management regimes, including large-scale cropland, small-scale cropland, and abandoned farmland. Monitoring land management regimes is a promising avenue to better understand the temporal and spatial patterns of land use intensity changes. For mapping and change detection, we used an object-based approach with Superpixel segmentation for delineating objects and a Random Forest classifier. We applied this approach to Landsat and ERS SAR data for the years 1986, 1993, 1999, 2006, and 2010 to estimate change trajectories for this time period in western Ukraine. The first period during the 1990s was characterized by post-socialist transition processes including farmland abandonment and substantial subsistence agriculture. Later on, recultivation processes and the recurrence of industrial, large-scale farming were triggered by global food prices that have led to a growing interest in this region.

    @Article{stefanski2014mapping2,
    title = {Mapping and monitoring of land use changes in post-Soviet western Ukraine using remote sensing data},
    author = {Stefanski, Jan and Chaskovskyy, Oleh and Waske, Bj{\"o}rn},
    journal = {Applied Geography},
    year = {2014},
    pages = {155--164},
    volume = {55},
    abstract = {While agriculture is expanded and intensified in many parts of the world, decreases in land use intensity and farmland abandonment take place in other parts. Eastern Europe experienced widespread changes of agricultural land use after the collapse of the Soviet Union in 1991, however, rates and patterns of these changes are still not well understood. Our objective was to map and analyze changes of land management regimes, including large-scale cropland, small-scale cropland, and abandoned farmland. Monitoring land management regimes is a promising avenue to better understand the temporal and spatial patterns of land use intensity changes. For mapping and change detection, we used an object-based approach with Superpixel segmentation for delineating objects and a Random Forest classifier. We applied this approach to Landsat and ERS SAR data for the years 1986, 1993, 1999, 2006, and 2010 to estimate change trajectories for this time period in western Ukraine. The first period during the 1990s was characterized by post-socialist transition processes including farmland abandonment and substantial subsistence agriculture. Later on, recultivation processes and the recurrence of industrial, large-scale farming were triggered by global food prices that have led to a growing interest in this region.},
    doi = {10.1016/j.apgeog.2014.08.003},
    issn = {01436228},
    }

  • J. Stefanski, T. Kuemmerle, O. Chaskovskyy, P. Griffiths, V. Havryluk, J. Knorn, N. Korol, A. Sieber, and B. Waske, “Mapping Land Management Regimes in Western Ukraine Using Optical and SAR Data,” Remote Sensing, vol. 6, iss. 6, p. 5279–5305, 2014. doi:10.3390/rs6065279
    [BibTeX]

    The global demand for agricultural products is surging due to population growth, more meat-based diets, and the increasing role of bioenergy. Three strategies can increase agricultural production: (1) expanding agriculture into natural ecosystems; (2) intensifying existing farmland; or (3) recultivating abandoned farmland. Because agricultural expansion entails substantial environmental trade-offs, intensification and recultivation are currently gaining increasing attention. Assessing where these strategies may be pursued, however, requires improved spatial information on land use intensity, including where farmland is active and fallow. We developed a framework to integrate optical and radar data in order to advance the mapping of three farmland management regimes: (1) large-scale, mechanized agriculture; (2) small-scale, subsistence agriculture; and (3) fallow or abandoned farmland. We applied this framework to our study area in western Ukraine, a region characterized by marked spatial heterogeneity in management intensity due to the legacies from Soviet land management, the breakdown of the Soviet Union in 1991, and the recent integration of this region into world markets. We mapped land management regimes using a hierarchical, object-based framework. Image segmentation for delineating objects was performed by using the Superpixel Contour algorithm. We then applied Random Forest classification to map land management regimes and validated our map using randomly sampled in-situ data, obtained during an extensive field campaign. Our results showed that farmland management regimes were mapped reliably, resulting in a final map with an overall accuracy of 83.4%. Comparing our land management regimes map with a soil map revealed that most fallow land occurred on soils marginally suited for agriculture, but some areas within our study region contained considerable potential for recultivation. Overall, our study highlights the potential for an improved, more nuanced mapping of agricultural land use by combining imagery of different sensors.

    @Article{stefanski2014mapping,
    title = {Mapping Land Management Regimes in Western Ukraine Using Optical and SAR Data},
    author = {Stefanski, Jan and Kuemmerle, Tobias and Chaskovskyy, Oleh and Griffiths, Patrick and Havryluk, Vassiliy and Knorn, Jan and Korol, Nikolas and Sieber, Anika and Waske, Bj{\"o}rn},
    journal = {Remote Sensing},
    year = {2014},
    number = {6},
    pages = {5279--5305},
    volume = {6},
    abstract = {The global demand for agricultural products is surging due to population growth, more meat-based diets, and the increasing role of bioenergy. Three strategies can increase agricultural production: (1) expanding agriculture into natural ecosystems; (2) intensifying existing farmland; or (3) recultivating abandoned farmland. Because agricultural expansion entails substantial environmental trade-offs, intensification and recultivation are currently gaining increasing attention. Assessing where these strategies may be pursued, however, requires improved spatial information on land use intensity, including where farmland is active and fallow. We developed a framework to integrate optical and radar data in order to advance the mapping of three farmland management regimes: (1) large-scale, mechanized agriculture; (2) small-scale, subsistence agriculture; and (3) fallow or abandoned farmland. We applied this framework to our study area in western Ukraine, a region characterized by marked spatial heterogeneity in management intensity due to the legacies from Soviet land management, the breakdown of the Soviet Union in 1991, and the recent integration of this region into world markets. We mapped land management regimes using a hierarchical, object-based framework. Image segmentation for delineating objects was performed by using the Superpixel Contour algorithm. We then applied Random Forest classification to map land management regimes and validated our map using randomly sampled in-situ data, obtained during an extensive field campaign. Our results showed that farmland management regimes were mapped reliably, resulting in a final map with an overall accuracy of 83.4%. Comparing our land management regimes map with a soil map revealed that most fallow land occurred on soils marginally suited for agriculture, but some areas within our study region contained considerable potential for recultivation. Overall, our study highlights the potential for an improved, more nuanced mapping of agricultural land use by combining imagery of different sensors.},
    doi = {10.3390/rs6065279},
    issn = {2072-4292},
    owner = {JanS},
    }

  • O. Vysotska, B. Frank, I. Ulbert, O. Paul, P. Ruther, C. Stachniss, and W. Burgard, “Automatic Channel Selection and Neural Signal Estimation across Channels of Neural Probes,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), Chicago, USA, 2014.
    [BibTeX] [PDF]
    @InProceedings{vysotska2014iros,
    title = {Automatic Channel Selection and Neural Signal Estimation across Channels of Neural Probes},
    author = {O. Vysotska and B. Frank and I. Ulbert and O. Paul and P. Ruther and C. Stachniss and W. Burgard},
    booktitle = iros,
    year = {2014},
    address = {Chicago, USA},
    }

  • V. A. Ziparo, G. Castelli, L. Van Gool, G. Grisetti, B. Leibe, M. Proesmans, and C. Stachniss, “The ROVINA Project. Robots for Exploration, Digital Preservation and Visualization of Archeological sites,” in Proc. of the 18th ICOMOS General Assembly and Scientific Symposium “Heritage and Landscape as Human Values”, 2014.
    [BibTeX]
    [none]
    @InProceedings{ziparo14icomosga,
    title = {The ROVINA Project. Robots for Exploration, Digital Preservation and Visualization of Archeological sites},
    author = {Ziparo, V.A. and Castelli, G. and Van Gool, L. and Grisetti, G. and Leibe, B. and Proesmans, M. and Stachniss, C.},
    booktitle = {Proc. of the 18th ICOMOS General Assembly and Scientific Symposium ``Heritage and Landscape as Human Values"},
    year = {2014},
    abstract = {[none]},
    timestamp = {2015.03.02},
    }

2013

  • N. Abdo, H. Kretzschmar, L. Spinello, and C. Stachniss, “Learning Manipulation Actions from a Few Demonstrations,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Karlsruhe, Germany, 2013.
    [BibTeX] [PDF]
    [none]
    @InProceedings{abdo2013,
    title = {Learning Manipulation Actions from a Few Demonstrations},
    author = {N. Abdo and H. Kretzschmar and L. Spinello and C. Stachniss},
    booktitle = icra,
    year = {2013},
    address = {Karlsruhe, Germany},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/abdo13icra.pdf},
    }

  • P. Agarwal, G. D. Tipaldi, L. Spinello, C. Stachniss, and W. Burgard, “Dynamic Covariance Scaling for Robust Robotic Mapping,” in ICRA Workshop on robust and Multimodal Inference in Factor Graphs, Karlsruhe, Germany, 2013.
    [BibTeX] [PDF]
    [none]
    @InProceedings{agarwal2013,
    title = {Dynamic Covariance Scaling for Robust Robotic Mapping},
    author = {P. Agarwal and G.D. Tipaldi and L. Spinello and C. Stachniss and W. Burgard},
    booktitle = {ICRA Workshop on robust and Multimodal Inference in Factor Graphs},
    year = {2013},
    address = {Karlsruhe, Germany},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/agarwal13icraws.pdf},
    }

  • P. Agarwal, G. D. Tipaldi, L. Spinello, C. Stachniss, and W. Burgard, “Robust Map Optimization using Dynamic Covariance Scaling,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Karlsruhe, Germany, 2013.
    [BibTeX] [PDF]
    [none]
    @InProceedings{agarwal2013a,
    title = {Robust Map Optimization using Dynamic Covariance Scaling},
    author = {P. Agarwal and G.D. Tipaldi and L. Spinello and C. Stachniss and W. Burgard},
    booktitle = icra,
    year = {2013},
    address = {Karlsruhe, Germany},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/agarwal13icra.pdf},
    }

  • K. Böhm, “Tiefenbildsegmentierung mit Hilfe geod\E4tischer Distanztransformation,” bachelor thesis Master Thesis, 2013.
    [BibTeX]
    [none]
    @MastersThesis{bohm2013,
    title = {Tiefenbildsegmentierung mit Hilfe geod\E4tischer Distanztransformation},
    author = {B\"ohm, Karsten},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2013},
    type = {bachelor thesis},
    abstract = {[none]},
    timestamp = {2014.01.20},
    }

  • A. Barth, J. Siegemund, and J. Schwehr, “Fast and precise localization at stop intersections,” in Intelligent Vehicles Symposium Workshops (IV Workshops), Gold Coast, Australia, 2013, p. 75–80.
    [BibTeX] [PDF]

    This article presents a practical solution for fast and precise localization of a vehicle’s position and orientation with respect to stop sign controlled intersections based on video sequences and mapped data. It consists of two steps. First, an intersection map is generated offline based on street-level imagery and GPS data, collected by a vehicle driving through an intersection from different directions. The map contains both landmarks for localization and information about stop line positions. This information is used in the second step to precisely and efficiently derive a vehicle’s pose in real-time when approaching a mapped intersection. At this point, we only need coarse GPS information to be able to load the proper map data.

    @InProceedings{barth2013fast,
    title = {Fast and precise localization at stop intersections},
    author = {Barth, Alexander and Siegemund, Jan and Schwehr, Julian},
    booktitle = {Intelligent Vehicles Symposium Workshops (IV Workshops) },
    year = {2013},
    address = {Gold Coast, Australia},
    pages = {75--80},
    publisher = {IEEE},
    abstract = {This article presents a practical solution for fast and precise localization of a vehicle's position and orientation with respect to stop sign controlled intersections based on video sequences and mapped data. It consists of two steps. First, an intersection map is generated offline based on street-level imagery and GPS data, collected by a vehicle driving through an intersection from different directions. The map contains both landmarks for localization and information about stop line positions. This information is used in the second step to precisely and efficiently derive a vehicle's pose in real-time when approaching a mapped intersection. At this point, we only need coarse GPS information to be able to load the proper map data.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Barth2013Fast.pdf},
    }

  • I. Bogoslavskyi, O. Vysotska, J. Serafin, G. Grisetti, and C. Stachniss, “Efficient Traversability Analysis for Mobile Robots using the Kinect Sensor,” in Proc. of the European Conf. on Mobile Robots (ECMR), Barcelona, Spain, 2013.
    [BibTeX] [PDF]
    [none]
    @InProceedings{bogoslavskyi2013,
    title = {Efficient Traversability Analysis for Mobile Robots using the Kinect Sensor},
    author = {I. Bogoslavskyi and O. Vysotska and J. Serafin and G. Grisetti and C. Stachniss},
    booktitle = ecmr,
    year = {2013},
    address = {Barcelona, Spain},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/bogoslavskyi13ecmr.pdf},
    }

  • W. Burgard and C. Stachniss, “Gestatten, Obelix!,” Forschung – Das Magazin der Deutschen Forschungsgemeinschaft, vol. 1, 2013.
    [BibTeX] [PDF]
    [none]
    @Article{burgard2013,
    title = {Gestatten, Obelix!},
    author = {W. Burgard and C. Stachniss},
    journal = {Forschung -- Das Magazin der Deutschen Forschungsgemeinschaft},
    year = {2013},
    note = {In German, invited},
    volume = {1},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/forschung_2013_01-pg4-9.pdf},
    }

  • D. Chai, W. Förstner, and F. Lafarge, “Recovering Line-Networks in Images by Junction-Point Processes,” in Proc. of the IEEE Conf. on Computer Vision and Pattern Recognition, 2013, pp. 1894-1901. doi:10.1109/CVPR.2013.247
    [BibTeX] [PDF]
    [none]
    @InProceedings{chai13recovering,
    title = {Recovering Line-Networks in Images by Junction-Point Processes},
    author = {D. Chai and W. F\"orstner and F. Lafarge},
    booktitle = {Proc. of the IEEE Conf. on Computer Vision and Pattern Recognition},
    year = {2013},
    pages = {1894-1901},
    abstract = {[none]},
    doi = {10.1109/CVPR.2013.247},
    timestamp = {2015.07.14},
    url = {https://www.ipb.uni-bonn.de/pdfs/chai13recovering.pdf},
    }

  • T. Dickscheid and W. Förstner, “A Trainable Markov Random Field for Low-Level Image Feature Matching with Spatial Relationships,” Photogrammetrie, Fernerkundung, Geoinformation (PFG), vol. 4, p. 269–284, 2013. doi:10.1127/1432-8364/2013/0176
    [BibTeX]

    Many vision applications rely on local features for image analysis, notably in the areas of object recognition, image registration and camera calibration. One important example in photogrammetry are fully automatic algorithms for relative image orientation. Such applications rely on a matching algorithm to extract a sufficient number of correct feature correspondences at acceptable outlier rates, which is most often based on the similarity of feature descriptions. When the number of detected features is low, it is advisable to use multiple feature detectors with complementary properties. When feature similarity is not sufficient for matching, spatial feature relationships provide valuable information. In this work, a highly generic matching algorithm is proposed which is based on a trainable Markov random field (MRF). It is able to incorporate almost arbitrary combinations of features, similarity measures and pairwise spatial relationships, and has a clear statistical interpretation. A major novelty is its ability to compensate for weaknesses in one information cue by implicitely exploiting the strengths of others.

    @Article{dickscheid2013trainable,
    title = {A Trainable Markov Random Field for Low-Level Image Feature Matching with Spatial Relationships},
    author = {Dickscheid, Timo and F\"orstner, Wolfgang},
    journal = {Photogrammetrie, Fernerkundung, Geoinformation (PFG)},
    year = {2013},
    pages = {269--284},
    volume = {4},
    abstract = { Many vision applications rely on local features for image analysis, notably in the areas of object recognition, image registration and camera calibration. One important example in photogrammetry are fully automatic algorithms for relative image orientation. Such applications rely on a matching algorithm to extract a sufficient number of correct feature correspondences at acceptable outlier rates, which is most often based on the similarity of feature descriptions. When the number of detected features is low, it is advisable to use multiple feature detectors with complementary properties. When feature similarity is not sufficient for matching, spatial feature relationships provide valuable information. In this work, a highly generic matching algorithm is proposed which is based on a trainable Markov random field (MRF). It is able to incorporate almost arbitrary combinations of features, similarity measures and pairwise spatial relationships, and has a clear statistical interpretation. A major novelty is its ability to compensate for weaknesses in one information cue by implicitely exploiting the strengths of others. },
    doi = {10.1127/1432-8364/2013/0176},
    }

  • W. Förstner, “Graphical Models in Geodesy and Photogrammetry,” Photogrammetrie, Fernerkundung, Geoinformation (PFG), vol. 4, p. 255–268, 2013. doi:10.1127/1432-8364/2013/0175
    [BibTeX]

    The paper gives an introduction into graphical models and their use in specifying stochastic models in geodesy and photogrammetry. Basic task in adjustment theory can intuitively be described and analysed using graphical models. The paper shows that geodetic networks and bundle adjustments can be interpreted as graphical models, both as Bayesian networks or as conditional random fields. Especially hidden Markov random fields and conditional random fields are demonstrated to be versatile models for parameter estimation and classification.

    @Article{foerstner2013graphical,
    title = {Graphical Models in Geodesy and Photogrammetry},
    author = {F\"orstner, Wolfgang},
    journal = {Photogrammetrie, Fernerkundung, Geoinformation (PFG)},
    year = {2013},
    pages = {255--268},
    volume = {4},
    abstract = { The paper gives an introduction into graphical models and their use in specifying stochastic models in geodesy and photogrammetry. Basic task in adjustment theory can intuitively be described and analysed using graphical models. The paper shows that geodetic networks and bundle adjustments can be interpreted as graphical models, both as Bayesian networks or as conditional random fields. Especially hidden Markov random fields and conditional random fields are demonstrated to be versatile models for parameter estimation and classification. },
    doi = {10.1127/1432-8364/2013/0175},
    }

  • W. Förstner, “Photogrammetrische Forschung – Eine Zwischenbilanz aus Bonner Sicht,” Photogrammetrie, Fernerkundung, Geoinformation (PFG), vol. 4, p. 251–254, 2013. doi:10.1127/1432-8364/2013/0186
    [BibTeX]

    Photogrammetrische Forschung – Eine Zwischenbilanz aus Bonner Sicht

    @Article{foerstner2013photogrammetrische,
    title = {Photogrammetrische Forschung - Eine Zwischenbilanz aus Bonner Sicht},
    author = {F\"orstner, Wolfgang},
    journal = {Photogrammetrie, Fernerkundung, Geoinformation (PFG)},
    year = {2013},
    pages = {251--254},
    volume = {4},
    abstract = {Photogrammetrische Forschung - Eine Zwischenbilanz aus Bonner Sicht},
    doi = {10.1127/1432-8364/2013/0186},
    }

  • A. Hornung, K. M. Wurm, M. Bennewitz, C. Stachniss, and W. Burgard, “OctoMap: An Efficient Probabilistic 3D Mapping Framework Based on Octrees,” Autonomous Robots, vol. 34, pp. 189-206, 2013.
    [BibTeX] [PDF]
    [none]
    @Article{hornung2013,
    title = {{OctoMap}: An Efficient Probabilistic 3D Mapping Framework Based on Octrees},
    author = {A. Hornung and K.M. Wurm and M. Bennewitz and C. Stachniss and W. Burgard},
    journal = auro,
    year = {2013},
    pages = {189-206},
    volume = {34},
    abstract = {[none]},
    issue = {3},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/hornung13auro.pdf},
    }

  • R. Kümmerle, M. Ruhnke, B. Steder, C. Stachniss, and W. Burgard, “A Navigation System for Robots Operating in Crowded Urban Environments,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Karlsruhe, Germany, 2013.
    [BibTeX] [PDF]
    [none]
    @InProceedings{kummerle2013,
    title = {A Navigation System for Robots Operating in Crowded Urban Environments},
    author = {R. K\"ummerle and M. Ruhnke and B. Steder and C. Stachniss and W. Burgard},
    booktitle = icra,
    year = {2013},
    address = {Karlsruhe, Germany},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/kuemmerle13icra.pdf},
    }

  • A. Kicherer, R. Roscher, K. Herzog, S. Šimon, W. Förstner, and R. Töpfer, “BAT (Berry Analysis Tool): A high-throughput image interpretation tool to acquire the number, diameter, and volume of grapevine berries,” Vitis, vol. 52, iss. 3, pp. 129-135, 2013.
    [BibTeX]

    QTL-analysis (quantitative trait loci) and marker development rely on efficient phenotyping techniques. Objectivity and precision of a phenotypic data evaluation is crucial but time consuming. In the present study a high-throughput image interpretation tool was developed to acquire automatically number, size, and volume of grape berries from RGB (red-green-blue) images. Individual berries of one cluster were placed on a defined construction to take a RGB image from the top. The image interpretation of one dataset with an arbitrary number of images occurs automatically by starting the BAT (Berry-Analysis-Tool) developed in MATLAB. For validation of results, the number of berries was counted and their size was measured using a digital calliper. A measuring cylinder was used to determine reliably the berry volume by displacement of water. All placed berries could be counted by BAT 100\A0\% correctly. Manual ratings compared with BAT ratings showed strong correlation of r\A0=\A00,964 for mean berry diameter/image and r\A0=\A00.984 for berry volume.

    @Article{kicherer2013,
    title = {BAT (Berry Analysis Tool): A high-throughput image interpretation tool to acquire the number, diameter, and volume of grapevine berries},
    author = {Kicherer, A. and Roscher, R. and Herzog, K. and {\vS}imon, S. and F\"orstner, W. and T\"opfer, R.},
    journal = {Vitis},
    year = {2013},
    number = {3},
    pages = {129-135},
    volume = {52},
    abstract = {QTL-analysis (quantitative trait loci) and marker development rely on efficient phenotyping techniques. Objectivity and precision of a phenotypic data evaluation is crucial but time consuming. In the present study a high-throughput image interpretation tool was developed to acquire automatically number, size, and volume of grape berries from RGB (red-green-blue) images. Individual berries of one cluster were placed on a defined construction to take a RGB image from the top. The image interpretation of one dataset with an arbitrary number of images occurs automatically by starting the BAT (Berry-Analysis-Tool) developed in MATLAB. For validation of results, the number of berries was counted and their size was measured using a digital calliper. A measuring cylinder was used to determine reliably the berry volume by displacement of water. All placed berries could be counted by BAT 100\A0\% correctly. Manual ratings compared with BAT ratings showed strong correlation of r\A0=\A00,964 for mean berry diameter/image and r\A0=\A00.984 for berry volume.},
    owner = {ribana1},
    timestamp = {2013.08.14},
    }

  • D. Maier, C. Stachniss, and M. Bennewitz, “Vision-Based Humanoid Navigation Using Self-Supervised Obstacle Detection,” The Intl. Journal of Humanoid Robotics (IJHR), vol. 10, 2013.
    [BibTeX] [PDF]
    [none]
    @Article{maier2013,
    title = {Vision-Based Humanoid Navigation Using Self-Supervised Obstacle Detection},
    author = {D. Maier and C. Stachniss and M. Bennewitz},
    journal = ijhr,
    year = {2013},
    volume = {10},
    abstract = {[none]},
    issue = {2},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/maier13ijhr.pdf},
    }

  • M. Nieuwenhuisen, D. Droeschel, J. Schneider, D. Holz, T. Läbe, and S. Behnke, “Multimodal Obstacle Detection and Collision Avoidance for Micro Aerial Vehicles,” in Proc. of the 6th European Conf. on Mobile Robots (ECMR), 2013. doi:10.1109/ECMR.2013.6698812
    [BibTeX] [PDF]

    Reliably perceiving obstacles and avoiding collisions is key for the fully autonomous application of micro aerial vehicles (MAVs). Limiting factors for increasing autonomy and complexity of MAVs (without external sensing and control) are limited onboard sensing and limited onboard processing power. In this paper, we propose a complete system with a multimodal sensor setup for omnidirectional obstacle perception. We developed a lightweight 3D laser scanner setup and visual obstacle detection using wide-angle stereo cameras. Together with our fast reactive collision avoidance approach based on local egocentric grid maps of the environment we aim at safe operation in the vicinity of structures like buildings or vegetation.

    @InProceedings{nieuwenhuisen13ecmr,
    title = {Multimodal Obstacle Detection and Collision Avoidance for Micro Aerial Vehicles},
    author = {Nieuwenhuisen, Matthias and Droeschel, David and Schneider, Johannes and Holz, Dirk and L\"abe, Thomas and Behnke, Sven},
    booktitle = {Proc. of the 6th European Conf. on Mobile Robots (ECMR)},
    year = {2013},
    abstract = {Reliably perceiving obstacles and avoiding collisions is key for the fully autonomous application of micro aerial vehicles (MAVs). Limiting factors for increasing autonomy and complexity of MAVs (without external sensing and control) are limited onboard sensing and limited onboard processing power. In this paper, we propose a complete system with a multimodal sensor setup for omnidirectional obstacle perception. We developed a lightweight 3D laser scanner setup and visual obstacle detection using wide-angle stereo cameras. Together with our fast reactive collision avoidance approach based on local egocentric grid maps of the environment we aim at safe operation in the vicinity of structures like buildings or vegetation.},
    city = {Barcelona},
    doi = {10.1109/ECMR.2013.6698812},
    url = {https://www.ais.uni-bonn.de/papers/ECMR_2013_Nieuwenhuisen_Multimodal_Obstacle_Avoidance.pdf},
    }

  • J. C. Rose, “Automatische Lokalisierung einer Drohne in einer Karte,” Master Thesis, 2013.
    [BibTeX]

    \textbf{Summary} The number of scientific contributions dealing with automatic vision based localization of mobile robots is significant. For a long time contributions have focused on mobile ground robots almost solely but with the new availability of civilly useable UAVs (Unmanned Aerial Vehicles) an interest in adapting the known methods for airworthy vehicles has risen. This work deals with developing a program system called LOCALZE for determining a full 6DOF (Degree of Freedom) position of an UAV in a metric map using vision whereby the metric map is constituted landmarks derived of SIFT-points. Position determination is reached over solving the correspondence problem between SIFT-points detected in the current image of the vision sensor and the landmarks. The potential of LOCALIZE concerning precision and accuracy of the determined position is evaluated in empirical studies using two vision sensors. Experiments demonstrate a dependency of the precision from the quality of the vision sensor. When using a high quality sensor a point error in position determination of about 1-3 cm and an accuracy of 1-7 cm can be reached. \textbf{Zusammenfassung} Die Anzahl wissenschaftlicher Beiträge zur automatischen Lokalisierung mobiler Roboter mittels Bildsensoren ist beträchtlich. Viele Beiträge fokussierten sich dabei lange auf die Untersuchung bodenbeschränkter Roboter. Im Laufe der letzten Jahre wuchs jedoch die Bedeutung der UAVs (Unmanned Aerial Vehicle) auch für zivile Anwendungen und damit das Interesse an einer Adaption der bisherigen Methoden an flugfähigen Robotern. In dieser Arbeit wird ein Programmsystem LOCALIZE für die 3D-Positionsbestimmung (mit allen 6 Freiheitsgraden) eines UAV mittels eines optischen Systems entworfen und sein Potential in empirischen Testszenarien evaluiert. Die Positionierung des Roboters geschieht dabei innerhalb einer a priori erstellten metrischen Karte eines Innenraums, die sich aus über den SIFT-Algorithmus abgeleiteten Landmarken konstituiert. Die Lokalisierung geschieht über Korrespondenzfindung zwischen den im aktuellen Bild des Roboters extrahierten und den Landmarken. Anhand der korrespondierenden Punkte in beiden Systemen wird ein iterativer Räumlicher Rückwärtsschnitt zur Positionsbestimmung verwendet. LOCALIZE wird anhand zweier Bildsensoren hinsichtlich potentieller Präzision und Richtigkeit der Positionsbestimmung untersucht. Die Experimente demonstrieren eine Abhängigkeit der Präzision von der Qualität des Bildsensors. Bei Verwendung eines hochwertigen Bildsensors kann ein Punktfehler der Positionierung von rund 1-3 cm und eine Richtigkeit von 1-7 cm erreicht werden.

    @MastersThesis{rose2013automatische,
    title = {Automatische Lokalisierung einer Drohne in einer Karte},
    author = {Rose, Johann Christian},
    school = {University of Bonn},
    year = {2013},
    note = {Betreuung: Prof. Dr. Bj\"orn Waske, Johannes Schneider},
    abstract = {\textbf{Summary} The number of scientific contributions dealing with automatic vision based localization of mobile robots is significant. For a long time contributions have focused on mobile ground robots almost solely but with the new availability of civilly useable UAVs (Unmanned Aerial Vehicles) an interest in adapting the known methods for airworthy vehicles has risen. This work deals with developing a program system called LOCALZE for determining a full 6DOF (Degree of Freedom) position of an UAV in a metric map using vision whereby the metric map is constituted landmarks derived of SIFT-points. Position determination is reached over solving the correspondence problem between SIFT-points detected in the current image of the vision sensor and the landmarks. The potential of LOCALIZE concerning precision and accuracy of the determined position is evaluated in empirical studies using two vision sensors. Experiments demonstrate a dependency of the precision from the quality of the vision sensor. When using a high quality sensor a point error in position determination of about 1-3 cm and an accuracy of 1-7 cm can be reached. \textbf{Zusammenfassung} Die Anzahl wissenschaftlicher Beitr\"age zur automatischen Lokalisierung mobiler Roboter mittels Bildsensoren ist betr\"achtlich. Viele Beitr\"age fokussierten sich dabei lange auf die Untersuchung bodenbeschr\"ankter Roboter. Im Laufe der letzten Jahre wuchs jedoch die Bedeutung der UAVs (Unmanned Aerial Vehicle) auch f\"ur zivile Anwendungen und damit das Interesse an einer Adaption der bisherigen Methoden an flugf\"ahigen Robotern. In dieser Arbeit wird ein Programmsystem LOCALIZE f\"ur die 3D-Positionsbestimmung (mit allen 6 Freiheitsgraden) eines UAV mittels eines optischen Systems entworfen und sein Potential in empirischen Testszenarien evaluiert. Die Positionierung des Roboters geschieht dabei innerhalb einer a priori erstellten metrischen Karte eines Innenraums, die sich aus \"uber den SIFT-Algorithmus abgeleiteten Landmarken konstituiert. Die Lokalisierung geschieht \"uber Korrespondenzfindung zwischen den im aktuellen Bild des Roboters extrahierten und den Landmarken. Anhand der korrespondierenden Punkte in beiden Systemen wird ein iterativer R\"aumlicher R\"uckw\"artsschnitt zur Positionsbestimmung verwendet. LOCALIZE wird anhand zweier Bildsensoren hinsichtlich potentieller Pr\"azision und Richtigkeit der Positionsbestimmung untersucht. Die Experimente demonstrieren eine Abh\"angigkeit der Pr\"azision von der Qualit\"at des Bildsensors. Bei Verwendung eines hochwertigen Bildsensors kann ein Punktfehler der Positionierung von rund 1-3 cm und eine Richtigkeit von 1-7 cm erreicht werden.},
    city = {Bonn},
    }

  • S. Schallenberg, “Erfassung des Landbedeckungswandels im Rheinischen Braunkohlerevier mittels Landsat-Satellitendaten,” bachelor thesis Master Thesis, 2013.
    [BibTeX]
    [none]
    @MastersThesis{schallenberg2013,
    title = {Erfassung des Landbedeckungswandels im Rheinischen Braunkohlerevier mittels Landsat-Satellitendaten},
    author = {Schallenberg, Sebastian},
    school = {Instiute of Photogrammetry, University of Bonn},
    year = {2013},
    note = {Betreuung: Prof.Dr. Bj\"orn Waske, M.Sc. Jan Stefanski},
    type = {bachelor thesis},
    abstract = {[none]},
    timestamp = {2014.01.20},
    }

  • F. Schindler, “Man-Made Surface Structures from Triangulated Point-Clouds,” PhD Thesis, 2013.
    [BibTeX] [PDF]

    Photogrammetry aims at reconstructing shape and dimensions of objects captured with cameras, 3D laser scanners or other spatial acquisition systems. While many acquisition techniques deliver triangulated point clouds with millions of vertices within seconds, the interpretation is usually left to the user. Especially when reconstructing man-made objects, one is interested in the underlying surface structure, which is not inherently present in the data. This includes the geometric shape of the object, e.g. cubical or cylindrical, as well as corresponding surface parameters, e.g. width, height and radius. Applications are manifold and range from industrial production control to architectural on-site measurements to large-scale city models. The goal of this thesis is to automatically derive such surface structures from triangulated 3D point clouds of man-made objects. They are defined as a compound of planar or curved geometric primitives. Model knowledge about typical primitives and relations between adjacent pairs of them should affect the reconstruction positively. After formulating a parametrized model for man-made surface structures, we develop a reconstruction framework with three processing steps: During a fast pre-segmentation exploiting local surface properties we divide the given surface mesh into planar regions. Making use of a model selection scheme based on minimizing the description length, this surface segmentation is free of control parameters and automatically yields an optimal number of segments. A subsequent refinement introduces a set of planar or curved geometric primitives and hierarchically merges adjacent regions based on their joint description length. A global classification and constraint parameter estimation combines the data-driven segmentation with high-level model knowledge. Therefore, we represent the surface structure with a graphical model and formulate factors based on likelihood as well as prior knowledge about parameter distributions and class probabilities. We infer the most probable setting of surface and relation classes with belief propagation and estimate an optimal surface parametrization with constraints induced by inter-regional relations. The process is specifically designed to work on noisy data with outliers and a few exceptional freeform regions not describable with geometric primitives. It yields full 3D surface structures with watertightly connected surface primitives of different types. The performance of the proposed framework is experimentally evaluated on various data sets. On small synthetically generated meshes we analyze the accuracy of the estimated surface parameters, the sensitivity w.r.t. various properties of the input data and w.r.t. model assumptions as well as the computational complexity. Additionally we demonstrate the flexibility w.r.t. different acquisition techniques on real data sets. The proposed method turns out to be accurate, reasonably fast and little sensitive to defects in the data or imprecise model assumptions.

    @PhDThesis{schindler2013:man-made,
    title = {Man-Made Surface Structures from Triangulated Point-Clouds},
    author = {Schindler, Falko},
    school = {Department of Photogrammetry, University of Bonn},
    year = {2013},
    abstract = {Photogrammetry aims at reconstructing shape and dimensions of objects captured with cameras, 3D laser scanners or other spatial acquisition systems. While many acquisition techniques deliver triangulated point clouds with millions of vertices within seconds, the interpretation is usually left to the user. Especially when reconstructing man-made objects, one is interested in the underlying surface structure, which is not inherently present in the data. This includes the geometric shape of the object, e.g. cubical or cylindrical, as well as corresponding surface parameters, e.g. width, height and radius. Applications are manifold and range from industrial production control to architectural on-site measurements to large-scale city models. The goal of this thesis is to automatically derive such surface structures from triangulated 3D point clouds of man-made objects. They are defined as a compound of planar or curved geometric primitives. Model knowledge about typical primitives and relations between adjacent pairs of them should affect the reconstruction positively. After formulating a parametrized model for man-made surface structures, we develop a reconstruction framework with three processing steps: During a fast pre-segmentation exploiting local surface properties we divide the given surface mesh into planar regions. Making use of a model selection scheme based on minimizing the description length, this surface segmentation is free of control parameters and automatically yields an optimal number of segments. A subsequent refinement introduces a set of planar or curved geometric primitives and hierarchically merges adjacent regions based on their joint description length. A global classification and constraint parameter estimation combines the data-driven segmentation with high-level model knowledge. Therefore, we represent the surface structure with a graphical model and formulate factors based on likelihood as well as prior knowledge about parameter distributions and class probabilities. We infer the most probable setting of surface and relation classes with belief propagation and estimate an optimal surface parametrization with constraints induced by inter-regional relations. The process is specifically designed to work on noisy data with outliers and a few exceptional freeform regions not describable with geometric primitives. It yields full 3D surface structures with watertightly connected surface primitives of different types. The performance of the proposed framework is experimentally evaluated on various data sets. On small synthetically generated meshes we analyze the accuracy of the estimated surface parameters, the sensitivity w.r.t. various properties of the input data and w.r.t. model assumptions as well as the computational complexity. Additionally we demonstrate the flexibility w.r.t. different acquisition techniques on real data sets. The proposed method turns out to be accurate, reasonably fast and little
    sensitive to defects in the data or imprecise model assumptions.},
    timestamp = {2013.11.26},
    url = {https://hss.ulb.uni-bonn.de/2013/3435/3435.htm},
    }

  • F. Schindler, Ein LaTeX-Kochbuch, 2013.
    [BibTeX] [PDF]

    Dieses Dokument fasst die wichtigsten LaTeX-Befehle und -Konstrukte zusammen, die man f\FCr das Verfassen von wissenschaftlichen Arbeiten ben\F6tigt. Auf aktuelle und umfangreichere Dokumentationen wird verwiesen. Auf die Installation von LaTeX und einem Editor (Empfehlung: TeX-Maker) sowie den grunds\E4tzlichen Kompiliervorgang3 wird nicht weiter eingegangen. Alle Beispiele sind vollst\E4ndig aufgef\FChrt und dem Dokument als TEX-Datei angeh\E4ngt (Aufruf \FCber B\FCroklammer-Symbol am Seitenrand). Sie sollten sich problemlos \FCbersetzen lassen und liefern das daneben oder darunter abgebildete Ergebnis. Lediglich die Seitenr\E4nder wurden aus Platzgr\FCnden mehr oder weniger gro\DFz\FCgig abgeschnitten.

    @Misc{schindler2013latex,
    title = {Ein LaTeX-Kochbuch},
    author = {Falko Schindler},
    month = mar,
    year = {2013},
    abstract = {Dieses Dokument fasst die wichtigsten LaTeX-Befehle und -Konstrukte zusammen, die man f\FCr das Verfassen von wissenschaftlichen Arbeiten ben\F6tigt. Auf aktuelle und umfangreichere Dokumentationen wird verwiesen. Auf die Installation von LaTeX und einem Editor (Empfehlung: TeX-Maker) sowie den grunds\E4tzlichen Kompiliervorgang3 wird nicht weiter eingegangen. Alle Beispiele sind vollst\E4ndig aufgef\FChrt und dem Dokument als TEX-Datei angeh\E4ngt (Aufruf \FCber B\FCroklammer-Symbol am Seitenrand). Sie sollten sich problemlos \FCbersetzen lassen und liefern das daneben oder darunter abgebildete Ergebnis. Lediglich die Seitenr\E4nder wurden aus Platzgr\FCnden mehr oder weniger gro\DFz\FCgig abgeschnitten.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Schindler2013Latex.pdf:LaTeX},
    }

  • F. Schindler and W. Förstner, “DijkstraFPS: Graph Partitioning in Geometry and Image Processing,” Photogrammetrie, Fernerkundung, Geoinformation (PFG), vol. 4, p. 285–296, 2013. doi:10.1127/1432-8364/2013/0177
    [BibTeX]

    Data partitioning is a common problem in the field of point cloud and image processing applicable to segmentation and clustering. The general principle is to have high similarity of two data points, e.g.pixels or 3D points, within one region and low similarity among regions. This pair-wise similarity between data points can be represented in an attributed graph. In this article we propose a novel graph partitioning algorithm. It integrates a sampling strategy known as farthest point sampling with Dijkstra’s algorithm for deriving a distance transform on a general graph, which does not need to be embedded in some space. According to the pair-wise attributes a Voronoi diagram on the graph is generated yielding the desired segmentation. We demonstrate our approach on various applications such as surface triangulation, surface segmentation, clustering and image segmentation.

    @Article{schindler2013dijkstrafps,
    title = {DijkstraFPS: Graph Partitioning in Geometry and Image Processing},
    author = {Schindler, Falko and F\"orstner, Wolfgang},
    journal = {Photogrammetrie, Fernerkundung, Geoinformation (PFG)},
    year = {2013},
    pages = {285--296},
    volume = {4},
    abstract = { Data partitioning is a common problem in the field of point cloud and image processing applicable to segmentation and clustering. The general principle is to have high similarity of two data points, e.g.pixels or 3D points, within one region and low similarity among regions. This pair-wise similarity between data points can be represented in an attributed graph. In this article we propose a novel graph partitioning algorithm. It integrates a sampling strategy known as farthest point sampling with Dijkstra's algorithm for deriving a distance transform on a general graph, which does not need to be embedded in some space. According to the pair-wise attributes a Voronoi diagram on the graph is generated yielding the desired segmentation. We demonstrate our approach on various applications such as surface triangulation, surface segmentation, clustering and image segmentation. },
    doi = {10.1127/1432-8364/2013/0177},
    }

  • J. Schneider and W. Förstner, “Bundle Adjustment and System Calibration with Points at Infinity for Omnidirectional Camera Systems,” Z. f. Photogrammetrie, Fernerkundung und Geoinformation, vol. 4, p. 309–321, 2013. doi:10.1127/1432-8364/2013/0179
    [BibTeX] [PDF]

    We present a calibration method for multi-view cameras that provides a rigorous maximum likelihood estimation of the mutual orientation of the cameras within a rigid multi-camera system. No calibration targets are needed, just a movement of the multi-camera system taking synchronized images of a highly textured and static scene. Multi-camera systems with non-overlapping views have to be rotated within the scene so that corresponding points are visible in different cameras at different times of exposure. By using an extended version of the projective collinearity equation all estimates can be optimized in one bundle adjustment where we constrain the relative poses of the cameras to be fixed. For stabilizing camera orientations – especially rotations – one should generally use points at the horizon within the bundle adjustment, which classical bundle adjustment programs are not capable of. We use a minimal representation of homogeneous coordinates for image and scene points which allows us to use images of omnidirectional cameras with single viewpoint like fisheye cameras and scene points at a large distance from the camera or even at infinity. We show results of our calibration method on (1) the omnidirectional multi-camera system Ladybug 3 from Point Grey, (2) a camera-rig with five cameras used for the acquisition of complex 3D structures and (3) a camera-rig mounted on a UAV consisting of four fisheye cameras which provide a large field of view and which is used for visual odometry and obstacle detection in the project MoD (DFG-Project FOR 1505 “Mapping on Demand”).

    @Article{schneider13pfg,
    title = {Bundle Adjustment and System Calibration with Points at Infinity for Omnidirectional Camera Systems},
    author = {J. Schneider and W. F\"orstner},
    journal = {Z. f. Photogrammetrie, Fernerkundung und Geoinformation},
    year = {2013},
    pages = {309--321},
    volume = {4},
    abstract = {We present a calibration method for multi-view cameras that provides a rigorous maximum likelihood estimation of the mutual orientation of the cameras within a rigid multi-camera system. No calibration targets are needed, just a movement of the multi-camera system taking synchronized images of a highly textured and static scene. Multi-camera systems with non-overlapping views have to be rotated within the scene so that corresponding points are visible in different cameras at different times of exposure. By using an extended version of the projective collinearity equation all estimates can be optimized in one bundle adjustment where we constrain the relative poses of the cameras to be fixed. For stabilizing camera orientations - especially rotations - one should generally use points at the horizon within the bundle adjustment, which classical bundle adjustment programs are not capable of. We use a minimal representation of homogeneous coordinates for image and scene points which allows us to use images of omnidirectional cameras with single viewpoint like fisheye cameras and scene points at a large distance from the camera or even at infinity. We show results of our calibration method on (1) the omnidirectional multi-camera system Ladybug 3 from Point Grey, (2) a camera-rig with five cameras used for the acquisition of complex 3D structures and (3) a camera-rig mounted on a UAV consisting of four fisheye cameras which provide a large field of view and which is used for visual odometry and obstacle detection in the project MoD (DFG-Project FOR 1505 "Mapping on Demand").},
    doi = {10.1127/1432-8364/2013/0179},
    url = {https://www.dgpf.de/pfg/2013/pfg2013_4_schneider.pdf},
    }

  • J. Schneider, T. Läbe, and W. Förstner, “Incremental Real-time Bundle Adjustment for Multi-camera Systems with Points at Infinity,” in ISPRS Archives of Photogrammetry, Remote Sensing and Spatial Information Sciences, 2013, pp. 355-360. doi:10.5194/isprsarchives-XL-1-W2-355-2013
    [BibTeX] [PDF]

    This paper presents a concept and first experiments on a keyframe-based incremental bundle adjustment for real-time structure and motion estimation in an unknown scene. In order to avoid periodic batch steps, we use the software iSAM2 for sparse nonlinear incremental optimization, which is highly efficient through incremental variable reordering and fluid relinearization. We adapted the software to allow for (1) multi-view cameras by taking the rigid transformation between the cameras into account, (2) omni-directional cameras as it can handle arbitrary bundles of rays and (3) scene points at infinity, which improve the estimation of the camera orientation as points at the horizon can be observed over long periods of time. The real-time bundle adjustment refers to sets of keyframes, consisting of frames, one per camera, taken in a synchronized way, that are initiated if a minimal geometric distance to the last keyframe set is exceeded. It uses interest points in the keyframes as observations, which are tracked in the synchronized video streams of the individual cameras and matched across the cameras, if possible. First experiments show the potential of the incremental bundle adjustment \wrt time requirements. Our experiments are based on a multi-camera system with four fisheye cameras, which are mounted on a UAV as two stereo pairs, one looking ahead and one looking backwards, providing a large field of view.

    @InProceedings{schneider13isprs,
    title = {Incremental Real-time Bundle Adjustment for Multi-camera Systems with Points at Infinity},
    author = {J. Schneider and T. L\"abe and W. F\"orstner},
    booktitle = {ISPRS Archives of Photogrammetry, Remote Sensing and Spatial Information Sciences},
    year = {2013},
    pages = {355-360},
    volume = {XL-1/W2},
    abstract = {This paper presents a concept and first experiments on a keyframe-based incremental bundle adjustment for real-time structure and motion estimation in an unknown scene. In order to avoid periodic batch steps, we use the software iSAM2 for sparse nonlinear incremental optimization, which is highly efficient through incremental variable reordering and fluid relinearization. We adapted the software to allow for (1) multi-view cameras by taking the rigid transformation between the cameras into account, (2) omni-directional cameras as it can handle arbitrary bundles of rays and (3) scene points at infinity, which improve the estimation of the camera orientation as points at the horizon can be observed over long periods of time. The real-time bundle adjustment refers to sets of keyframes, consisting of frames, one per camera, taken in a synchronized way, that are initiated if a minimal geometric distance to the last keyframe set is exceeded. It uses interest points in the keyframes as observations, which are tracked in the synchronized video streams of the individual cameras and matched across the cameras, if possible. First experiments show the potential of the incremental bundle adjustment \wrt time requirements. Our experiments are based on a multi-camera system with four fisheye cameras, which are mounted on a UAV as two stereo pairs, one looking ahead and one looking backwards, providing a large field of view.},
    doi = {10.5194/isprsarchives-XL-1-W2-355-2013},
    url = {https://www.int-arch-photogramm-remote-sens-spatial-inf-sci.net/XL-1-W2/355/2013/isprsarchives-XL-1-W2-355-2013.pdf},
    }

  • J. Siegemund, “Street Surfaces and Boundaries from Depth Image Sequences using Probabilistic Models,” PhD Thesis, 2013.
    [BibTeX] [PDF]

    This thesis presents an approach for the detection and reconstruction of street surfaces and boundaries from depth image sequences. Active driver assistance systems which monitor and interpret the environment based on vehicle mounted sensors to support the driver embody a current research focus of the automotive industry. An essential task of these systems is the modeling of the vehicle’s static environment. This comprises the determination of the vertical slope and curvature characteristics of the street surface as well as the robust detection of obstacles and, thus, the free drivable space (alias free-space). In this regard, obstacles of low height, e.g. curbs, are of special interest since they often embody the first geometric delimiter of the free-space. The usage of depth images acquired from stereo camera systems becomes more important in this context due to the high data rate and affordable price of the sensor. However, recent approaches for object detection are often limited to the detection of objects which are distinctive in height, such as cars and guardrails, or explicitly address the detection of particular object classes. These approaches are usually based on extremely restrictive assumptions, such as planar street surfaces, in order to deal with the high measurement noise. The main contribution of this thesis is the development, analysis and evaluation of an approach which detects the free-space in the immediate maneuvering area in front of the vehicle and explicitly models the free-space boundary by means of a spline curve. The approach considers in particular obstacles of low height (higher than 10 cm) without limitation on particular object classes. Furthermore, the approach has the ability to cope with various slope and curvature characteristics of the observed street surface and is able to reconstruct this surface by means of a flexible spline model. In order to allow for robust results despite the flexibility of the model and the high measurement noise, the approach employs probabilistic models for the preprocessing of the depth map data as well as for the detection of the drivable free-space. An elevation model is computed from the depth map considering the paths of the optical rays and the uncertainty of the depth measurements. Based on this elevation model, an iterative two step approach is performed which determines the drivable free-space by means of a Markov Random Field and estimates the spline parameters of the free-space boundary curve and the street surface. Outliers in the elevation data are explicitly modeled. The performance of the overall approach and the influence of key components are systematically evaluated within experiments on synthetic and real world test scenarios. The results demonstrate the ability of the approach to accurately model the boundary of the drivable free-space as well as the street surface even in complex scenarios with multiple obstacles or strong curvature of the street surface. The experiments further reveal the limitations of the approach, which are discussed in detail. Zusammenfassung Sch\E4tzung von Stra\DFenoberfl\E4chen und -begrenzungen aus Sequenzen von Tiefenkarten unter Verwendung probabilistischer Modelle Diese Arbeit pr\E4sentiert ein Verfahren zur Detektion und Rekonstruktion von Stra\DFenoberfl\E4chen und -begrenzungen auf der Basis von Tiefenkarten. Aktive Fahrerassistenzsysteme, welche mit der im Fahrzeug verbauten Sensorik die Umgebung erfassen, interpretieren und den Fahrer unterst\FCtzen, sind ein aktueller Forschungsschwerpunkt der Fahrzeugindustrie. Eine wesentliche Aufgabe dieser Systeme ist die Modellierung der statischen Fahrzeugumgebung. Dies beinhaltet die Bestimmung der vertikalen Neigungs- und Kr\FCmmungseigenschaften der Fahrbahn, sowie die robuste Detektion von Hindernissen und somit des befahrbaren Freiraumes. Hindernisse von geringer H\F6he, wie z.B. Bordsteine, sind in diesem Zusammenhang von besonderem Interesse, da sie h\E4ufig die erste geometrische Begrenzung des Fahrbahnbereiches darstellen. In diesem Kontext gewinnt die Verwendung von Tiefenkarten aus Stereo-Kamera-Systemen wegen der hohen Datenrate und relativ geringen Kosten des Sensors zunehmend an Bedeutung. Aufgrund des starken Messrauschens beschr\E4nken sich herk\F6mmliche Verfahren zur Hinderniserkennung jedoch meist auf erhabene Objekte wie Fahrzeuge oder Leitplanken, oder aber adressieren einzelne Objektklassen wie Bordsteine explizit. Dazu werden h\E4ufig extrem restriktive Annahmen verwendet wie z.B. planare Stra \DFenoberfl\E4chen. Der Hauptbeitrag dieser Arbeit besteht in der Entwicklung, Analyse und Evaluation eines Verfahrens, welches den befahrbaren Freiraum im Nahbereich des Fahrzeugs detektiert und dessen Begrenzung mit Hilfe einer Spline-Kurve explizit modelliert. Das Verfahren ber\FCcksichtigt insbesondere Hindernisse geringer H\F6he (gr\F6\DFer als 10 cm) ohne Beschr\E4nkung auf bestimmte Objektklassen. Weiterhin ist das Verfahren in der Lage, mit verschiedenartigen Neigungs- und Kr\FCmmungseigenschaften der vor dem Fahrzeug liegenden Fahrbahnoberfl\E4che umzugehen und diese durch Verwendung eines flexiblen Spline-Modells zu rekonstruieren. Um trotz der hohen Flexibilit\E4t des Modells und des hohen Messrauschens robuste Ergebnisse zu erzielen, verwendet das Verfahren probabilistische Modelle zur Vorverarbeitung der Eingabedaten und zur Detektion des befahrbaren Freiraumes. Aus den Tiefenkarten wird unter Ber\FCcksichtigung der Strahleng\E4nge und Unsicherheiten der Tiefenmessungen ein H\F6henmodell berechnet. In einem iterativen Zwei-Schritt-Verfahren werden anhand dieses H\F6henmodells der befahrbare Freiraum mit Hilfe eines Markov-Zufallsfeldes bestimmt sowie die Parameter der begrenzenden Spline-Kurve und Stra \DFenoberfl\E4che gesch\E4tzt. Ausrei\DFer in den H\F6hendaten werden dabei explizit modelliert. Die Leistungsf\E4higkeit des Gesamtverfahrens sowie der Einfluss zentraler Komponenten, wird im Rahmen von Experimenten auf synthetischen und realen Testszenen systematisch analysiert. Die Ergebnisse demonstrieren die F\E4higkeit des Verfahrens, die Begrenzung des befahrbaren Freiraumes sowie die Fahrbahnoberfl\E4che selbst in komplexen Szenarien mit multiplen Hindernissen oder starker Fahrbahnkr\FCmmung akkurat zu modellieren. Weiterhin werden die Grenzen des Verfahrens aufgezeigt und detailliert untersucht.

    @PhDThesis{siegemund2013,
    title = {Street Surfaces and Boundaries from Depth Image Sequences using Probabilistic Models},
    author = {Siegemund, Jan},
    school = {Department of Photogrammetry, University of Bonn},
    year = {2013},
    abstract = {This thesis presents an approach for the detection and reconstruction of street surfaces and boundaries from depth image sequences. Active driver assistance systems which monitor and interpret the environment based on vehicle mounted sensors to support the driver embody a current research focus of the automotive industry. An essential task of these systems is the modeling of the vehicle's static environment. This comprises the determination of the vertical slope and curvature characteristics of the street surface as well as the robust detection of obstacles and, thus, the free drivable space (alias free-space). In this regard, obstacles of low height, e.g. curbs, are of special interest since they often embody the first geometric delimiter of the free-space. The usage of depth images acquired from stereo camera systems becomes more important in this context due to the high data rate and affordable price of the sensor. However, recent approaches for object detection are often limited to the detection of objects which are distinctive in height, such as cars and guardrails, or explicitly address the detection of particular object classes. These approaches are usually based on extremely restrictive assumptions, such as planar street surfaces, in order to deal with the high measurement noise. The main contribution of this thesis is the development, analysis and evaluation of an approach which detects the free-space in the immediate maneuvering area in front of the vehicle and explicitly models the free-space boundary by means of a spline curve. The approach considers in particular obstacles of low height (higher than 10 cm) without limitation on particular object classes. Furthermore, the approach has the ability to cope with various slope and curvature characteristics of the observed street surface and is able to reconstruct this surface by means of a flexible spline model. In order to allow for robust results despite the flexibility of the model and the high measurement noise, the approach employs probabilistic models for the preprocessing of the depth map data as well as for the detection of the drivable free-space. An elevation model is computed from the depth map considering the paths of the optical rays and the uncertainty of the depth measurements. Based on this elevation model, an iterative two step approach is performed which determines the drivable free-space by means of a Markov Random Field and estimates the spline parameters of the free-space boundary curve and the street surface. Outliers in the elevation data are explicitly modeled. The performance of the overall approach and the influence of key components are systematically evaluated within experiments on synthetic and real world test scenarios. The results demonstrate the ability of the approach to accurately model the boundary of the drivable free-space as well as the street surface even in complex scenarios with multiple obstacles or strong curvature of the
    street surface. The experiments further reveal the limitations of the approach, which are discussed in detail. Zusammenfassung Sch\E4tzung von Stra\DFenoberfl\E4chen und -begrenzungen aus Sequenzen von Tiefenkarten unter Verwendung probabilistischer Modelle Diese Arbeit pr\E4sentiert ein Verfahren zur Detektion und Rekonstruktion von Stra\DFenoberfl\E4chen und -begrenzungen auf der Basis von Tiefenkarten. Aktive Fahrerassistenzsysteme, welche mit der im Fahrzeug verbauten Sensorik die Umgebung erfassen, interpretieren und den Fahrer unterst\FCtzen, sind ein aktueller Forschungsschwerpunkt der Fahrzeugindustrie. Eine wesentliche Aufgabe dieser Systeme ist die Modellierung der statischen Fahrzeugumgebung. Dies beinhaltet die Bestimmung der vertikalen Neigungs- und Kr\FCmmungseigenschaften der Fahrbahn, sowie die robuste Detektion von Hindernissen und somit des befahrbaren Freiraumes. Hindernisse von geringer H\F6he, wie z.B. Bordsteine, sind in diesem Zusammenhang von besonderem Interesse, da sie h\E4ufig die erste geometrische Begrenzung des Fahrbahnbereiches darstellen. In diesem Kontext gewinnt die Verwendung von Tiefenkarten aus Stereo-Kamera-Systemen wegen der hohen Datenrate und relativ geringen Kosten des Sensors zunehmend an Bedeutung. Aufgrund des starken Messrauschens beschr\E4nken sich herk\F6mmliche Verfahren zur Hinderniserkennung jedoch meist auf erhabene Objekte wie Fahrzeuge oder Leitplanken, oder aber adressieren einzelne Objektklassen wie Bordsteine explizit. Dazu werden h\E4ufig extrem restriktive Annahmen verwendet wie z.B. planare Stra \DFenoberfl\E4chen. Der Hauptbeitrag dieser Arbeit besteht in der Entwicklung, Analyse und Evaluation eines Verfahrens, welches den befahrbaren Freiraum im Nahbereich des Fahrzeugs detektiert und dessen Begrenzung mit Hilfe einer Spline-Kurve explizit modelliert. Das Verfahren ber\FCcksichtigt insbesondere Hindernisse geringer H\F6he (gr\F6\DFer als 10 cm) ohne Beschr\E4nkung auf bestimmte Objektklassen. Weiterhin ist das Verfahren in der Lage, mit verschiedenartigen Neigungs- und Kr\FCmmungseigenschaften der vor dem Fahrzeug liegenden Fahrbahnoberfl\E4che umzugehen und diese durch Verwendung eines flexiblen Spline-Modells zu rekonstruieren. Um trotz der hohen Flexibilit\E4t des Modells und des hohen Messrauschens robuste Ergebnisse zu erzielen, verwendet das Verfahren probabilistische Modelle zur Vorverarbeitung der Eingabedaten und zur Detektion des befahrbaren Freiraumes. Aus den Tiefenkarten wird unter Ber\FCcksichtigung der Strahleng\E4nge und Unsicherheiten der Tiefenmessungen ein H\F6henmodell berechnet. In einem iterativen Zwei-Schritt-Verfahren werden anhand dieses H\F6henmodells der befahrbare Freiraum mit Hilfe eines Markov-Zufallsfeldes bestimmt sowie die Parameter der begrenzenden Spline-Kurve und Stra \DFenoberfl\E4che gesch\E4tzt. Ausrei\DFer in den H\F6hendaten werden dabei explizit modelliert. Die Leistungsf\E4higkeit des Gesamtverfahrens sowie der Einfluss
    zentraler Komponenten, wird im Rahmen von Experimenten auf synthetischen und realen Testszenen systematisch analysiert. Die Ergebnisse demonstrieren die F\E4higkeit des Verfahrens, die Begrenzung des befahrbaren Freiraumes sowie die Fahrbahnoberfl\E4che selbst in komplexen Szenarien mit multiplen Hindernissen oder starker Fahrbahnkr\FCmmung akkurat zu modellieren. Weiterhin werden die Grenzen des Verfahrens aufgezeigt und detailliert untersucht.},
    timestamp = {2013.10.07},
    url = {https://hss.ulb.uni-bonn.de/2013/3436/3436.htm},
    }

  • J. Stefanski, B. Mack, and B. Waske, “Optimization of object-based image analysis with Random Forests for land cover mapping,” IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing, vol. 6, iss. 6, p. 2492–2504, 2013. doi:10.1109/JSTARS.2013.2253089
    [BibTeX]

    A prerequisite for object-based image analysis is the generation of adequate segments. However, the parameters for the image segmentation algorithms are often manually defined. Therefore, the generation of an ideal segmentation level is usually costly and user-depended. In this paper a strategy for a semi-automatic optimization of object-based classification of multitemporal data is introduced by using Random Forests (RF) and a novel segmentation algorithm. The Superpixel Contour (SPc) algorithm is used to generate a set of different levels of segmentation, using various combinations of parameters in a user-defined range. Finally, the best parameter combination is selected based on the cross-validation-like out-of-bag (OOB) error that is provided by RF. Therefore, the quality of the parameters and the corresponding segmentation level can be assessed in terms of the classification accuracy, without providing additional independent test data. To evaluate the potential of the proposed concept, we focus on land cover classification of two study areas, using multitemporal RapidEye and SPOT 5 images. A classification that is based on eCognition’s widely used Multiresolution Segmentation algorithm (MRS) is used for comparison. Experimental results underline that the two segmentation algorithms SPc and MRS perform similar in terms of accuracy and visual interpretation. The proposed strategy that uses the OOB error for the selection of the ideal segmentation level provides similar classification accuracies, when compared to the results achieved by manual-based image segmentation. Overall, the proposed strategy is operational and easy to handle and thus economizes the findings of optimal segmentation parameters for the Superpixel Contour algorithm.

    @Article{stefanski2013optimization,
    title = {Optimization of object-based image analysis with Random Forests for land cover mapping},
    author = {Stefanski, Jan and Mack, Benjamin and Waske, Bj\"orn},
    journal = {IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing},
    year = {2013},
    number = {6},
    pages = {2492--2504},
    volume = {6},
    abstract = {A prerequisite for object-based image analysis is the generation of adequate segments. However, the parameters for the image segmentation algorithms are often manually defined. Therefore, the generation of an ideal segmentation level is usually costly and user-depended. In this paper a strategy for a semi-automatic optimization of object-based classification of multitemporal data is introduced by using Random Forests (RF) and a novel segmentation algorithm. The Superpixel Contour (SPc) algorithm is used to generate a set of different levels of segmentation, using various combinations of parameters in a user-defined range. Finally, the best parameter combination is selected based on the cross-validation-like out-of-bag (OOB) error that is provided by RF. Therefore, the quality of the parameters and the corresponding segmentation level can be assessed in terms of the classification accuracy, without providing additional independent test data. To evaluate the potential of the proposed concept, we focus on land cover classification of two study areas, using multitemporal RapidEye and SPOT 5 images. A classification that is based on eCognition's widely used Multiresolution Segmentation algorithm (MRS) is used for comparison. Experimental results underline that the two segmentation algorithms SPc and MRS perform similar in terms of accuracy and visual interpretation. The proposed strategy that uses the OOB error for the selection of the ideal segmentation level provides similar classification accuracies, when compared to the results achieved by manual-based image segmentation. Overall, the proposed strategy is operational and easy to handle and thus economizes the findings of optimal segmentation parameters for the Superpixel Contour algorithm.},
    doi = {10.1109/JSTARS.2013.2253089},
    issn = {1939-1404},
    owner = {JanS},
    timestamp = {2013.03.14},
    }

  • S. Wenzel and W. Förstner, “Finding Poly-Curves of Straight Line and Ellipse Segments in Images,” Photogrammetrie, Fernerkundung, Geoinformation (PFG), vol. 4, p. 297–308, 2013. doi:10.1127/1432-8364/2013/0178
    [BibTeX]

    Simplification of given polygons has attracted many researchers. Especially, finding circular and elliptical structures in images is relevant in many applications. Given pixel chains from edge detection, this paper proposes a method to segment them into straight line and ellipse segments. We propose an adaption of Douglas-Peucker’s polygon simplification algorithm using circle segments instead of straight line segments and partition the sequence of points instead the sequence of edges. It is robust and decreases the complexity of given polygons better than the original algorithm. In a second step, we further simplify the poly-curve by merging neighbouring segments to straight line and ellipse segments. Merging is based on the evaluation of variation of entropy for proposed geometric models, which turns out as a combination of hypothesis testing and model selection. We demonstrate the results of {\tt circlePeucker} as well as merging on several images of scenes with significant circular structures and compare them with the method of {\sc Patraucean} et al. (2012).

    @Article{wenzel2013finding,
    title = {Finding Poly-Curves of Straight Line and Ellipse Segments in Images},
    author = {Wenzel, Susanne and F\"orstner, Wolfgang},
    journal = {Photogrammetrie, Fernerkundung, Geoinformation (PFG)},
    year = {2013},
    pages = {297--308},
    volume = {4},
    abstract = {Simplification of given polygons has attracted many researchers. Especially, finding circular and elliptical structures in images is relevant in many applications. Given pixel chains from edge detection, this paper proposes a method to segment them into straight line and ellipse segments. We propose an adaption of Douglas-Peucker's polygon simplification algorithm using circle segments instead of straight line segments and partition the sequence of points instead the sequence of edges. It is robust and decreases the complexity of given polygons better than the original algorithm. In a second step, we further simplify the poly-curve by merging neighbouring segments to straight line and ellipse segments. Merging is based on the evaluation of variation of entropy for proposed geometric models, which turns out as a combination of hypothesis testing and model selection. We demonstrate the results of {\tt circlePeucker} as well as merging on several images of scenes with significant circular structures and compare them with the method of {\sc Patraucean} et al. (2012).},
    doi = {10.1127/1432-8364/2013/0178},
    file = {Technical Report:Wenzel2013Finding.pdf},
    }

  • S. Wenzel and W. Förstner, “Finding Poly-Curves of Straight Line and Ellipse Segments in Images,” Department of Photogrammetry, University of Bonn, TR-IGG-P-2013-02, 2013.
    [BibTeX] [PDF]

    Simplification of given polygons has attracted many researchers. Especially, finding circular and elliptical structures in images is relevant in many applications. Given pixel chains from edge detection, this paper proposes a method to segment them into straight line and ellipse segments. We propose an adaption of Douglas-Peucker’s polygon simplification algorithm using circle segments instead of straight line segments and partition the sequence of points instead the sequence of edges. It is robust and decreases the complexity of given polygons better than the original algorithm. In a second step, we further simplify the poly-curve by merging neighbouring segments to straight line and ellipse segments. Merging is based on the evaluation of variation of entropy for proposed geometric models, which turns out as a combination of hypothesis testing and model selection. We demonstrate the results of {\tt circlePeucker} as well as merging on several images of scenes with significant circular structures and compare them with the method of {\sc Patraucean} et al. (2012).

    @TechReport{wenzel2013findingtr,
    title = {Finding Poly-Curves of Straight Line and Ellipse Segments in Images},
    author = {Wenzel, Susanne and F\"orstner, Wolfgang},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2013},
    month = {July},
    number = {TR-IGG-P-2013-02},
    abstract = {Simplification of given polygons has attracted many researchers. Especially, finding circular and elliptical structures in images is relevant in many applications. Given pixel chains from edge detection, this paper proposes a method to segment them into straight line and ellipse segments. We propose an adaption of Douglas-Peucker's polygon simplification algorithm using circle segments instead of straight line segments and partition the sequence of points instead the sequence of edges. It is robust and decreases the complexity of given polygons better than the original algorithm. In a second step, we further simplify the poly-curve by merging neighbouring segments to straight line and ellipse segments. Merging is based on the evaluation of variation of entropy for proposed geometric models, which turns out as a combination of hypothesis testing and model selection. We demonstrate the results of {\tt circlePeucker} as well as merging on several images of scenes with significant circular structures and compare them with the method of {\sc Patraucean} et al. (2012).},
    url = {https://www.ipb.uni-bonn.de/pdfs/Wenzel2013Finding.pdf},
    }

  • K. M. Wurm, C. Dornhege, B. Nebel, W. Burgard, and C. Stachniss, “Coordinating Heterogeneous Teams of Robots using Temporal Symbolic Planning,” Autonomous Robots, vol. 34, 2013.
    [BibTeX] [PDF]
    [none]
    @Article{wurm2013,
    title = {Coordinating Heterogeneous Teams of Robots using Temporal Symbolic Planning},
    author = {K.M. Wurm and C. Dornhege and B. Nebel and W. Burgard and C. Stachniss},
    journal = auro,
    year = {2013},
    volume = {34},
    abstract = {[none]},
    issue = {4},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/wurm13auro.pdf},
    }

  • K. M. Wurm, H. Kretzschmar, R. Kümmerle, C. Stachniss, and W. Burgard, “Identifying Vegetation from Laser Data in Structured Outdoor Environments,” Journal on Robotics and Autonomous Systems (RAS), 2013.
    [BibTeX] [PDF]
    [none]
    @Article{wurm2013a,
    title = {Identifying Vegetation from Laser Data in Structured Outdoor Environments},
    author = {K.M. Wurm and H. Kretzschmar and R. K{\"u}mmerle and C. Stachniss and W. Burgard},
    journal = jras,
    year = {2013},
    note = {In press},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/wurm13ras.pdf},
    }

2012

  • N. Abdo, H. Kretzschmar, and C. Stachniss, “From Low-Level Trajectory Demonstrations to Symbolic Actions for Planning,” in Proc. of the ICAPS Workshop on Combining Task and Motion Planning for Real-World Applications (TAMPRA), 2012.
    [BibTeX] [PDF]
    [none]
    @InProceedings{abdo2012,
    title = {From Low-Level Trajectory Demonstrations to Symbolic Actions for Planning},
    author = {N. Abdo and H. Kretzschmar and C. Stachniss},
    booktitle = {Proc. of the ICAPS Workshop on Combining Task and Motion Planning for Real-World Applications (TAMPRA)},
    year = {2012},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/abdo12tampra.pdf},
    }

  • P. A. Becker, “3D Rekonstruktion symmetrischer Objekte aus Tiefenbildern,” bachelor thesis Master Thesis, 2012.
    [BibTeX]

    none

    @MastersThesis{becker2012rekonstruktion,
    title = {3D Rekonstruktion symmetrischer Objekte aus Tiefenbildern},
    author = {Becker, Philip Alexander},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2012},
    type = {bachelor thesis},
    abstract = {none},
    timestamp = {2013.04.16},
    }

  • D. Chai, W. Förstner, and M. Ying Yang, “Combine Markov Random Fields and Marked Point Processes to extract Building from Remotely Sensed Images,” in ISPRS Annals of Photogrammetry, Remote Sensing and Spatial Information Sciences, 2012. doi:10.5194/isprsannals-I-3-365-2012
    [BibTeX] [PDF]

    Automatic building extraction from remotely sensed images is a research topic much more significant than ever. One of the key issues is object and image representation. Markov random fields usually referring to the pixel level can not represent high-level knowledge well. On the contrary, marked point processes can not represent low-level information well even though they are a powerful model at object level. We propose to combine Markov random fields and marked point processes to represent both low-level information and high-level knowledge, and present a combined framework of modelling and estimation for building extraction from single remotely sensed image. At high level, rectangles are used to represent buildings, and a marked point process is constructed to represent the buildings on ground scene. Interactions between buildings are introduced into the the model to represent their relationships. At the low level, a MRF is used to represent the statistics of the image appearance. Histograms of colours are adopted to represent the building’s appearance. The high-level model and the low-level model are combined by establishing correspondences between marked points and nodes of the MRF. We adopt reversible jump Markov Chain Monte Carlo (RJMCMC) techniques to explore the configuration space at the high level, and adopt a Graph Cut algorithm to optimize configuration at the low level. We propose a top-down schema to use results from high level to guide the optimization at low level, and propose a bottom-up schema to use results from low level to drive the sampling at high level. Experimental results demonstrate that better results can be achieved by adopting such hybrid representation.

    @InProceedings{chai*12:combine,
    title = {Combine Markov Random Fields and Marked Point Processes to extract Building from Remotely Sensed Images},
    author = {Chai, D. and F\"orstner, W. and Ying Yang, M.},
    booktitle = {ISPRS Annals of Photogrammetry, Remote Sensing and Spatial Information Sciences},
    year = {2012},
    abstract = {Automatic building extraction from remotely sensed images is a research topic much more significant than ever. One of the key issues is object and image representation. Markov random fields usually referring to the pixel level can not represent high-level knowledge well. On the contrary, marked point processes can not represent low-level information well even though they are a powerful model at object level. We propose to combine Markov random fields and marked point processes to represent both low-level information and high-level knowledge, and present a combined framework of modelling and estimation for building extraction from single remotely sensed image. At high level, rectangles are used to represent buildings, and a marked point process is constructed to represent the buildings on ground scene. Interactions between buildings are introduced into the the model to represent their relationships. At the low level, a MRF is used to represent the statistics of the image appearance. Histograms of colours are adopted to represent the building's appearance. The high-level model and the low-level model are combined by establishing correspondences between marked points and nodes of the MRF. We adopt reversible jump Markov Chain Monte Carlo (RJMCMC) techniques to explore the configuration space at the high level, and adopt a Graph Cut algorithm to optimize configuration at the low level. We propose a top-down schema to use results from high level to guide the optimization at low level, and propose a bottom-up schema to use results from low level to drive the sampling at high level. Experimental results demonstrate that better results can be achieved by adopting such hybrid representation.},
    doi = {10.5194/isprsannals-I-3-365-2012},
    timestamp = {2015.07.09},
    url = {https://www.ipb.uni-bonn.de/pdfs/isprsannals-I-3-365-2012.pdf},
    }

  • W. Förstner, “Minimal Representations for Testing and Estimation in Projective Spaces,” Z. f. Photogrammetrie, Fernerkundung und Geoinformation, vol. 3, p. 209–220, 2012. doi:10.1127/1432-8364/2012/0112
    [BibTeX]

    Testing and estimation using homogeneous coordinates and matrices has to cope with obstacles such as singularities of covariance matrices and redundant parametrizations. The paper proposes a representation of the uncertainty of all types of geometric entities which (1) only requires the minimum number of parameters, (2) is free of singularities, (3) enables to exploit the simplicity of homogeneous coordinates to represent geometric constraints and (4) allows to handle geometric entities which are at infinity or at least very far away. We develop the concept, discuss its usefulness for bundle adjustment and demonstrate its applicability for determining 3D lines from observed image line segments in a multi view setup.

    @Article{forstner2012minimal,
    title = {Minimal Representations for Testing and Estimation in Projective Spaces},
    author = {F\"orstner, Wolfgang},
    journal = {Z. f. Photogrammetrie, Fernerkundung und Geoinformation},
    year = {2012},
    pages = {209--220},
    volume = {3},
    abstract = {Testing and estimation using homogeneous coordinates and matrices has to cope with obstacles such as singularities of covariance matrices and redundant parametrizations. The paper proposes a representation of the uncertainty of all types of geometric entities which (1) only requires the minimum number of parameters, (2) is free of singularities, (3) enables to exploit the simplicity of homogeneous coordinates to represent geometric constraints and (4) allows to handle geometric entities which are at infinity or at least very far away. We develop the concept, discuss its usefulness for bundle adjustment and demonstrate its applicability for determining 3D lines from observed image line segments in a multi view setup.},
    doi = {10.1127/1432-8364/2012/0112},
    file = {Technical Report:Forstner2012Minimal.pdf},
    timestamp = {2013.01.09},
    }

  • W. Förstner, “Minimal Representations for Testing and Estimation in Projective Spaces,” Department of Photogrammetry, University of Bonn, TR-IGG-P-2012-03, 2012.
    [BibTeX] [PDF]

    Testing and estimation using homogeneous coordinates and matrices has to cope with obstacles such as singularities of covariance matrices and redundant parametrizations. The paper proposes a representation of the uncertainty of all types of geometric entities which (1) only requires the minimum number of parameters, (2) is free of singularities, (3) enables to exploit the simplicity of homogeneous coordinates to represent geometric constraints and (4) allows to handle geometric entities which are at infinity or at least very far away. We develop the concept, discuss its usefulness for bundle adjustment and demonstrate its applicability for determining 3D lines from observed image line segments in a multi view setup.

    @TechReport{forstner2012minimalreport,
    title = {Minimal Representations for Testing and Estimation in Projective Spaces},
    author = {F\"orstner, Wolfgang},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2012},
    number = {TR-IGG-P-2012-03},
    abstract = {Testing and estimation using homogeneous coordinates and matrices has to cope with obstacles such as singularities of covariance matrices and redundant parametrizations. The paper proposes a representation of the uncertainty of all types of geometric entities which (1) only requires the minimum number of parameters, (2) is free of singularities, (3) enables to exploit the simplicity of homogeneous coordinates to represent geometric constraints and (4) allows to handle geometric entities which are at infinity or at least very far away. We develop the concept, discuss its usefulness for bundle adjustment and demonstrate its applicability for determining 3D lines from observed image line segments in a multi view setup.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner2012Minimal.pdf},
    }

  • S. Gehrig, A. Barth, N. Schneider, and J. Siegemund, “A Multi-Cue Approach for Stereo-Based Object Confidence Estimation,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), Vilamoura, Portugal, 2012, p. 3055 – 3060. doi:10.1109/IROS.2012.6385455
    [BibTeX]

    In this contribution we present an approach to compute object confidences for stereo-vision-based object tracking schemes. Meaningful object confidences help to reduce false alarm rates of safety systems and improve the downstream system performance for modules such as sensor fusion and situation analysis. Several cues from stereo vision and from the tracking process are fused in a Bayesian manner. An evaluation on a 38,000 frames urban drive shows the effectiveness of the approach compared to the same object tracking scheme with simple heuristics for the object confidence. Within the evaluation, also the relevance of occurring phantoms is considered by computing the collision risk. The proposed confidence measures reduce the number of predicted imminent collisions from 86 to 0 maintaining almost the same system availability.

    @InProceedings{gehrig2012multi,
    title = {A Multi-Cue Approach for Stereo-Based Object Confidence Estimation},
    author = {Gehrig, Stefan and Barth, Alexander and Schneider, Nicolai and Siegemund, Jan},
    booktitle = iros,
    year = {2012},
    address = {Vilamoura, Portugal},
    pages = {3055 -- 3060},
    abstract = {In this contribution we present an approach to compute object confidences for stereo-vision-based object tracking schemes. Meaningful object confidences help to reduce false alarm rates of safety systems and improve the downstream system performance for modules such as sensor fusion and situation analysis. Several cues from stereo vision and from the tracking process are fused in a Bayesian manner. An evaluation on a 38,000 frames urban drive shows the effectiveness of the approach compared to the same object tracking scheme with simple heuristics for the object confidence. Within the evaluation, also the relevance of occurring phantoms is considered by computing the collision risk. The proposed confidence measures reduce the number of predicted imminent collisions from 86 to 0 maintaining almost the same system availability.},
    doi = {10.1109/IROS.2012.6385455},
    }

  • G. Grisetti, L. Iocchi, B. Leibe, V. A. Ziparo, and C. Stachniss, “Digitization of Inaccessible Archeological Sites with Autonomous Mobile Robots,” in Conf. on Robotics Innovation for Cultural Heritage, 2012.
    [BibTeX]
    [none]
    @InProceedings{grisetti2012,
    title = {Digitization of Inaccessible Archeological Sites with Autonomous Mobile Robots},
    author = {G. Grisetti and L. Iocchi and B. Leibe and V.A. Ziparo and C. Stachniss},
    booktitle = {Conf. on Robotics Innovation for Cultural Heritage},
    year = {2012},
    abstract = {[none]},
    notes = {Extended abstract},
    timestamp = {2014.04.24},
    }

  • M. Hans, “Die Verbesserung einer Bildsegmentierung unter Verwendung von 3D Merkmalen,” bachelor thesis Master Thesis, 2012.
    [BibTeX] [PDF]

    Ziel einer partionellen Bildsegmentierung ist die Einteilung eines Bildes in Regionen. Dabei wird jedes Pixel zu je einer Region zugeordnet. Liegen ungünstige Beleuchtungsverhältnisse im Bild vor, ist eine Segmentierung einzig basierend auf Bilddaten nicht ausreichend, da aneinandergrenzende Objekteile mit ähnlichen Farbwerten nicht unterschieden werden können. Mit Hilfe von 3D-Merkmalen können wir solche Bildsegmentierungen verbessern. Dabei liegt der Fokus der Arbeit auf segmentierten Luftbildern mit Dachflächen. Mit der Annahme, dass sich die Dächer aus Flächen erster Ordnung zusammensetzen, werden in den vorsegmentierten Bildregionen zunächst zwei Ebenen in den zugeordneten Punkten einer 3D-Punktwolke geschätzt. Hierzu wird der random sample consensus (RANSAC, Fischler and Bolles (1981)) verwendet. Wir beschränken uns auf die Trennkante zweier Dachflächen, die in einem bekannten Winkel $\varphi$ zueinander stehen und die gleiche Neigung haben. Die Berechnung der Ebenenparameter ist somit bereits mit vier geeigneten Punkten der Objektkoordinaten möglich. Mit den geschätzten Ebenen in der Punktwolke segmentierte Bildregion kann diese aufgesplittet werden. Hierzu wenden wir ein lineares diskriminatives Modell an, um eine lineare Kante als Trennung in der Bildsegmentierung einzeichnen zu können. Eine visuelle Evaluierung der Ergebnisse zeigt, dass die hier vorgestellten Verfahren eine Trennung der Dachregionen an einer sinnvollen Stelle ermöglichen. Dabei werden die Verfahren an Bildern mit unterschiedlichen Dachformen getestet. Die Leistungsfähigkeit der Verfahren hängt vor Allem von der Punktkonfiguration der von RANSAC ausgewählten Punkte ab. Diese Arbeit beschreibt uns somit Verfahren, die eine regionenbasierende Segmentierung von Dachflächen auf Luftbildern unter der Verwendung von 3D Merkmalen verbessern.

    @MastersThesis{hans2010die,
    title = {Die Verbesserung einer Bildsegmentierung unter Verwendung von 3D Merkmalen},
    author = {Hans, Mathias},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2012},
    note = {Betreuung: Prof. Dr.-Ing Wolfgang F\"orstner, Dipl.-Ing. Ribana Roscher},
    type = {bachelor thesis},
    abstract = {Ziel einer partionellen Bildsegmentierung ist die Einteilung eines Bildes in Regionen. Dabei wird jedes Pixel zu je einer Region zugeordnet. Liegen ung\"unstige Beleuchtungsverh\"altnisse im Bild vor, ist eine Segmentierung einzig basierend auf Bilddaten nicht ausreichend, da aneinandergrenzende Objekteile mit \"ahnlichen Farbwerten nicht unterschieden werden k\"onnen. Mit Hilfe von 3D-Merkmalen k\"onnen wir solche Bildsegmentierungen verbessern. Dabei liegt der Fokus der Arbeit auf segmentierten Luftbildern mit Dachfl\"achen. Mit der Annahme, dass sich die D\"acher aus Fl\"achen erster Ordnung zusammensetzen, werden in den vorsegmentierten Bildregionen zun\"achst zwei Ebenen in den zugeordneten Punkten einer 3D-Punktwolke gesch\"atzt. Hierzu wird der random sample consensus (RANSAC, Fischler and Bolles (1981)) verwendet. Wir beschr\"anken uns auf die Trennkante zweier Dachfl\"achen, die in einem bekannten Winkel $\varphi$ zueinander stehen und die gleiche Neigung haben. Die Berechnung der Ebenenparameter ist somit bereits mit vier geeigneten Punkten der Objektkoordinaten m\"oglich. Mit den gesch\"atzten Ebenen in der Punktwolke segmentierte Bildregion kann diese aufgesplittet werden. Hierzu wenden wir ein lineares diskriminatives Modell an, um eine lineare Kante als Trennung in der Bildsegmentierung einzeichnen zu k\"onnen. Eine visuelle Evaluierung der Ergebnisse zeigt, dass die hier vorgestellten Verfahren eine Trennung der Dachregionen an einer sinnvollen Stelle erm\"oglichen. Dabei werden die Verfahren an Bildern mit unterschiedlichen Dachformen getestet. Die Leistungsf\"ahigkeit der Verfahren h\"angt vor Allem von der Punktkonfiguration der von RANSAC ausgew\"ahlten Punkte ab. Diese Arbeit beschreibt uns somit Verfahren, die eine regionenbasierende Segmentierung von Dachfl\"achen auf Luftbildern unter der Verwendung von 3D Merkmalen verbessern.},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Hans2010Die.pdf},
    }

  • D. Joho, G. D. Tipaldi, N. Engelhard, C. Stachniss, and W. Burgard, “Nonparametric Bayesian Models for Unsupervised Scene Analysis and Reconstruction,” in Proc. of Robotics: Science and Systems (RSS), 2012.
    [BibTeX] [PDF]
    [none]
    @InProceedings{joho2012,
    title = {Nonparametric {B}ayesian Models for Unsupervised Scene Analysis and Reconstruction},
    author = {D. Joho and G.D. Tipaldi and N. Engelhard and C. Stachniss and W. Burgard},
    booktitle = rss,
    year = {2012},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/joho12rss.pdf},
    }

  • S. Klemenjak, B. Waske, S. Valero, and J. Chanussot, “Automatic Detection of Rivers in High-Resolution SAR Data,” IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing, vol. 5, iss. 5, p. 1364–1372, 2012. doi:10.1109/JSTARS.2012.2189099
    [BibTeX]

    Remote sensing plays a major role in supporting decision-making and surveying compliance of several multilateral environmental treaties. In this paper, we present an approach for supporting monitoring compliance of river networks in context of the European Water Framework Directive. Only a few approaches have been developed for extracting river networks from satellite data and usually they require manual input, which seems not feasible for automatic and operational application. We propose a method for the automatic extraction of river structures in TerraSAR-X data. The method is based on mathematical morphology and supervised image classification, using automatically selected training samples. The method is applied on TerraSAR-X images from two different study sites. In addition, the results are compared to an alternative method, which requires manual user interaction. The detailed accuracy assessment shows that the proposed method achieves accurate results (Kappa $ {sim}$ 0.7) and performs almost similar in terms of accuracy, when compared to the alternative approach. Moreover, the proposed method can be applied on various datasets (e.g., multitemporal, multisensoral and multipolarized) and does not require any additional user input. Thus, the highly flexible approach is interesting in terms of operational monitoring systems and large scale applications.

    @Article{klemenjak2012automatic,
    title = {Automatic Detection of Rivers in High-Resolution SAR Data},
    author = {Klemenjak, Sascha and Waske, Bj\"orn and Valero, Sivia and Chanussot, Jocelyn},
    journal = {IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing},
    year = {2012},
    month = oct,
    number = {5},
    pages = {1364--1372},
    volume = {5},
    abstract = {Remote sensing plays a major role in supporting decision-making and surveying compliance of several multilateral environmental treaties. In this paper, we present an approach for supporting monitoring compliance of river networks in context of the European Water Framework Directive. Only a few approaches have been developed for extracting river networks from satellite data and usually they require manual input, which seems not feasible for automatic and operational application. We propose a method for the automatic extraction of river structures in TerraSAR-X data. The method is based on mathematical morphology and supervised image classification, using automatically selected training samples. The method is applied on TerraSAR-X images from two different study sites. In addition, the results are compared to an alternative method, which requires manual user interaction. The detailed accuracy assessment shows that the proposed method achieves accurate results (Kappa $ {sim}$ 0.7) and performs almost similar in terms of accuracy, when compared to the alternative approach. Moreover, the proposed method can be applied on various datasets (e.g., multitemporal, multisensoral and multipolarized) and does not require any additional user input. Thus, the highly flexible approach is interesting in terms of operational monitoring systems and large scale applications.},
    doi = {10.1109/JSTARS.2012.2189099},
    issn = {1939-1404},
    owner = {waske},
    timestamp = {2012.09.06},
    }

  • F. Korč, “Tractable Learning for a Class of Global Discriminative Models for Context Sensitive Image Interpretation,” PhD Thesis, 2012.
    [BibTeX] [PDF]
    [none]
    @PhDThesis{korvc2012tractable,
    title = {Tractable Learning for a Class of Global Discriminative Models for Context Sensitive Image Interpretation},
    author = {Kor{\vc}, Filip},
    school = {Department of Photogrammetry, University of Bonn},
    year = {2012},
    abstract = {[none]},
    url = {https://hss.ulb.uni-bonn.de/2012/3010/3010.htm},
    }

  • H. Kretzschmar and C. Stachniss, “Information-Theoretic Pose Graph Compression for Laser-based SLAM,” Intl. Journal of Robotics Research (IJRR), vol. 31, p. 1219–1230, 2012.
    [BibTeX] [PDF]
    [none]
    @Article{kretzschmar2012,
    title = {Information-Theoretic Pose Graph Compression for Laser-based {SLAM}},
    author = {H. Kretzschmar and C. Stachniss},
    journal = ijrr,
    year = {2012},
    pages = {1219--1230},
    volume = {31},
    abstract = {[none]},
    issue = {11},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/kretzschmar12ijrr.pdf},
    }

  • J. Roewekaemper, C. Sprunk, G. D. Tipaldi, C. Stachniss, P. Pfaff, and W. Burgard, “On the Position Accuracy of Mobile Robot Localization based on Particle Filters combined with Scan Matching,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2012.
    [BibTeX] [PDF]
    [none]
    @InProceedings{roewekaemper2012,
    title = {On the Position Accuracy of Mobile Robot Localization based on Particle Filters combined with Scan Matching},
    author = {J. Roewekaemper and C. Sprunk and G.D. Tipaldi and C. Stachniss and P. Pfaff and W. Burgard},
    booktitle = iros,
    year = {2012},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://ais.informatik.uni-freiburg.de/publications/papers/roewekaemper12iros.pdf},
    }

  • R. Roscher, “Sequential Learning using Incremental Import Vector Machines for Semantic Segmentation,” PhD Thesis, 2012.
    [BibTeX] [PDF]

    We propose an innovative machine learning algorithm called incremental import vector machines that is used for classification purposes. The classifier is specifically designed for the task of sequential learning, in which the data samples are successively presented to the classifier. The motivation for our work comes from the effort to formulate a classifier that can manage the major challenges of sequential learning problems, while being a powerful classifier in terms of classification accuracy, efficiency and meaningful output. One challenge of sequential learning is that data samples are not completely available to the learner at a given point of time and generally, waiting for a representative number of data is undesirable and impractical. Thus, in order to allow for a classification of given data samples at any time, the learning phase of the classifier model needs to start immediately, even if not all training samples are available. Another challenge is that the number of sequential arriving data samples can be very large or even infinite and thus, not all samples can be stored. Furthermore, the distribution of the sample can vary over time and the classifier model needs to remain stable and unchanged to irrelevant samples while being plastic to new, important samples. Therefore our key contribution is to develop, analyze and evaluate a powerful incremental learner for sequential learning which we call incremental import vector machines (I2VMs). The classifier is based on the batch machine learning algorithm import vector machines, which was developed by Zhu and Hastie (2005). I2VM is a kernel-based, discriminative classifier and thus, is able to deal with complex data distributions. Additionally ,the learner is sparse for an efficient training and testing and has a probabilistic output. A key achievement of this thesis is the verification and analysis of the discriminative and reconstructive model components of IVM and I2VM. While discriminative classifiers try to separate the classes as well as possible, classifiers with a reconstructive component aspire to have a high information content in order to approximate the distribution of the data samples. Both properties are necessary for a powerful incremental classifier. A further key achievement is the formulation of the incremental learning strategy of I2VM. The strategy deals with adding and removing data samples and the update of the current set of model parameters. Furthermore, also new classes and features can be incorporated. The learning strategy adapts the model continuously, while keeping it stable and efficient. In our experiments we use I2VM for the semantic segmentation of images from an image database, for large area land cover classification of overlapping remote sensing images and for object tracking in image sequences. We show that I2VM results in superior or competitive classification accuracies to comparable classifiers. A substantial achievement of the thesis is that I2VM’s performance is independent of the ordering of the data samples and a reconsidering of already encountered samples for learning is not necessary. A further achievement is that I2VM is able to deal with very long data streams without a loss in the efficiency. Furthermore, as another achievement, we show that I2VM provide reliable posterior probabilities since samples with high class probabilities are accurately classified, whereas relatively low class probabilities are more likely referred to misclassified samples.

    @PhDThesis{roscher2012sequential,
    title = {Sequential Learning using Incremental Import Vector Machines for Semantic Segmentation},
    author = {Roscher, Ribana},
    school = {Department of Photogrammetry, University of Bonn},
    year = {2012},
    abstract = {We propose an innovative machine learning algorithm called incremental import vector machines that is used for classification purposes. The classifier is specifically designed for the task of sequential learning, in which the data samples are successively presented to the classifier. The motivation for our work comes from the effort to formulate a classifier that can manage the major challenges of sequential learning problems, while being a powerful classifier in terms of classification accuracy, efficiency and meaningful output. One challenge of sequential learning is that data samples are not completely available to the learner at a given point of time and generally, waiting for a representative number of data is undesirable and impractical. Thus, in order to allow for a classification of given data samples at any time, the learning phase of the classifier model needs to start immediately, even if not all training samples are available. Another challenge is that the number of sequential arriving data samples can be very large or even infinite and thus, not all samples can be stored. Furthermore, the distribution of the sample can vary over time and the classifier model needs to remain stable and unchanged to irrelevant samples while being plastic to new, important samples. Therefore our key contribution is to develop, analyze and evaluate a powerful incremental learner for sequential learning which we call incremental import vector machines (I2VMs). The classifier is based on the batch machine learning algorithm import vector machines, which was developed by Zhu and Hastie (2005). I2VM is a kernel-based, discriminative classifier and thus, is able to deal with complex data distributions. Additionally ,the learner is sparse for an efficient training and testing and has a probabilistic output. A key achievement of this thesis is the verification and analysis of the discriminative and reconstructive model components of IVM and I2VM. While discriminative classifiers try to separate the classes as well as possible, classifiers with a reconstructive component aspire to have a high information content in order to approximate the distribution of the data samples. Both properties are necessary for a powerful incremental classifier. A further key achievement is the formulation of the incremental learning strategy of I2VM. The strategy deals with adding and removing data samples and the update of the current set of model parameters. Furthermore, also new classes and features can be incorporated. The learning strategy adapts the model continuously, while keeping it stable and efficient. In our experiments we use I2VM for the semantic segmentation of images from an image database, for large area land cover classification of overlapping remote sensing images and for object tracking in image sequences. We show that I2VM results in superior or competitive classification accuracies to comparable classifiers. A substantial achievement of the
    thesis is that I2VM's performance is independent of the ordering of the data samples and a reconsidering of already encountered samples for learning is not necessary. A further achievement is that I2VM is able to deal with very long data streams without a loss in the efficiency. Furthermore, as another achievement, we show that I2VM provide reliable posterior probabilities since samples with high class probabilities are accurately classified, whereas relatively low class probabilities are more likely referred to misclassified samples.},
    city = {Bonn},
    url = {https://hss.ulb.uni-bonn.de/2012/3009/3009.htm},
    }

  • R. Roscher, W. Förstner, and B. Waske, “I²VM: Incremental import vector machines,” Image and Vision Computing, vol. 30, iss. 4-5, p. 263–278, 2012. doi:10.1016/j.imavis.2012.04.004
    [BibTeX]

    We introduce an innovative incremental learner called incremental import vector machines ((IVM)-V-2). The kernel-based discriminative approach is able to deal with complex data distributions. Additionally, the learner is sparse for an efficient training and testing and has a probabilistic output. We particularly investigate the reconstructive component of import vector machines, in order to use it for robust incremental teaming. By performing incremental update steps, we are able to add and remove data samples, as well as update the current set of model parameters for incremental learning. By using various standard benchmarks, we demonstrate how (IVM)-V-2 is competitive or superior to other incremental methods. It is also shown that our approach is capable of managing concept-drifts in the data distributions. (C) 2012 Elsevier B.V. All rights reserved.

    @Article{roscher2012i2vm,
    title = {I²VM: Incremental import vector machines},
    author = {Roscher, Ribana and F\"orstner, Wolfgang and Waske, Bj\"orn},
    journal = {Image and Vision Computing},
    year = {2012},
    month = may,
    number = {4-5},
    pages = {263--278},
    volume = {30},
    abstract = {We introduce an innovative incremental learner called incremental import vector machines ((IVM)-V-2). The kernel-based discriminative approach is able to deal with complex data distributions. Additionally, the learner is sparse for an efficient training and testing and has a probabilistic output. We particularly investigate the reconstructive component of import vector machines, in order to use it for robust incremental teaming. By performing incremental update steps, we are able to add and remove data samples, as well as update the current set of model parameters for incremental learning. By using various standard benchmarks, we demonstrate how (IVM)-V-2 is competitive or superior to other incremental methods. It is also shown that our approach is capable of managing concept-drifts in the data distributions. (C) 2012 Elsevier B.V. All rights reserved.},
    doi = {10.1016/j.imavis.2012.04.004},
    owner = {waske},
    sn = {0262-8856},
    tc = {0},
    timestamp = {2012.09.04},
    ut = {WOS:000305726700001},
    z8 = {0},
    z9 = {0},
    zb = {0},
    }

  • R. Roscher, J. Siegemund, F. Schindler, and W. Förstner, “Object Tracking by Segmentation Using Incremental Import Vector Machines,” Department of Photogrammetry, University of Bonn 2012.
    [BibTeX] [PDF]

    We propose a framework for object tracking in image sequences, following the concept of tracking-by-segmentation. The separation of object and background is achieved by a consecutive semantic superpixel segmentation of the images, yielding tight object boundaries. I.e., in the first image a model of the object’s characteristics is learned from an initial, incomplete annotation. This model is used to classify the superpixels of subsequent images to object and background employing graph-cut. We assume the object boundaries to be tight-fitting and the object motion within the image to be affine. To adapt the model to radiometric and geometric changes we utilize an incremental learner in a co-training scheme. We evaluate our tracking framework qualitatively and quantitatively on several image sequences.

    @TechReport{roscher2012object,
    title = {Object Tracking by Segmentation Using Incremental Import Vector Machines},
    author = {Roscher, Ribana and Siegemund, Jan and Schindler, Falko and F\"orstner, Wolfgang},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2012},
    abstract = {We propose a framework for object tracking in image sequences, following the concept of tracking-by-segmentation. The separation of object and background is achieved by a consecutive semantic superpixel segmentation of the images, yielding tight object boundaries. I.e., in the first image a model of the object's characteristics is learned from an initial, incomplete annotation. This model is used to classify the superpixels of subsequent images to object and background employing graph-cut. We assume the object boundaries to be tight-fitting and the object motion within the image to be affine. To adapt the model to radiometric and geometric changes we utilize an incremental learner in a co-training scheme. We evaluate our tracking framework qualitatively and quantitatively on several image sequences.},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Roscher2012Object.pdf},
    }

  • R. Roscher, B. Waske, and W. Förstner, “Evaluation of Import Vector Machines for Classifying Hyperspectral Data,” Department of Photogrammetry, University of Bonn 2012.
    [BibTeX] [PDF]

    We evaluate the performance of Import Vector Machines (IVM),a sparse Kernel Logistic Regression approach, for the classification of hyperspectral data. The IVM classifier is applied on two different data sets, using different number of training samples. The performance of IVM to Support Vector Machines (SVM) is compared in terms of accuracy and sparsity. Moreover, the impact of the training sample set on the accuracy and stability of IVM was investigated. The results underline that the IVM perform similar when compared to the popular SVM in terms of accuracy. Moreover, the number of import vectors from the IVM is significantly lower when compared to the number of support vectors from the SVM. Thus, the classification process of the IVM is faster. These findings are independent from the study site, the number of training samples and specific classes. Consequently, the proposed IVM approach is a promising classification method for hyperspectral imagery.

    @TechReport{roscher2012evaluation,
    title = {Evaluation of Import Vector Machines for Classifying Hyperspectral Data},
    author = {Roscher, Ribana and Waske, Bj\"orn and F\"orstner, Wolfgang},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2012},
    abstract = {We evaluate the performance of Import Vector Machines (IVM),a sparse Kernel Logistic Regression approach, for the classification of hyperspectral data. The IVM classifier is applied on two different data sets, using different number of training samples. The performance of IVM to Support Vector Machines (SVM) is compared in terms of accuracy and sparsity. Moreover, the impact of the training sample set on the accuracy and stability of IVM was investigated. The results underline that the IVM perform similar when compared to the popular SVM in terms of accuracy. Moreover, the number of import vectors from the IVM is significantly lower when compared to the number of support vectors from the SVM. Thus, the classification process of the IVM is faster. These findings are independent from the study site, the number of training samples and specific classes. Consequently, the proposed IVM approach is a promising classification method for hyperspectral imagery.},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Roscher2012Evaluation.pdf},
    }

  • R. Roscher, B. Waske, and W. Förstner, “Incremental Import Vector Machines for Classifying Hyperspectral Data,” IEEE Transactions on Geoscience and Remote Sensing, vol. 50, iss. 9, p. 3463–3473, 2012. doi:10.1109/TGRS.2012.2184292
    [BibTeX]

    In this paper, we propose an incremental learning strategy for import vector machines (IVM), which is a sparse kernel logistic regression approach. We use the procedure for the concept of self-training for sequential classification of hyperspectral data. The strategy comprises the inclusion of new training samples to increase the classification accuracy and the deletion of noninformative samples to be memory and runtime efficient. Moreover, we update the parameters in the incremental IVM model without retraining from scratch. Therefore, the incremental classifier is able to deal with large data sets. The performance of the IVM in comparison to support vector machines (SVM) is evaluated in terms of accuracy, and experiments are conducted to assess the potential of the probabilistic outputs of the IVM. Experimental results demonstrate that the IVM and SVM perform similar in terms of classification accuracy. However, the number of import vectors is significantly lower when compared to the number of support vectors, and thus, the computation time during classification can be decreased. Moreover, the probabilities provided by IVM are more reliable, when compared to the probabilistic information, derived from an SVM’s output. In addition, the proposed self-training strategy can increase the classification accuracy. Overall, the IVM and its incremental version is worthwhile for the classification of hyperspectral data.

    @Article{roscher2012incremental,
    title = {Incremental Import Vector Machines for Classifying Hyperspectral Data},
    author = {Roscher, Ribana and Waske, Bj\"orn and F\"orstner, Wolfgang},
    journal = {IEEE Transactions on Geoscience and Remote Sensing},
    year = {2012},
    month = sep,
    number = {9},
    pages = {3463--3473},
    volume = {50},
    abstract = {In this paper, we propose an incremental learning strategy for import vector machines (IVM), which is a sparse kernel logistic regression approach. We use the procedure for the concept of self-training for sequential classification of hyperspectral data. The strategy comprises the inclusion of new training samples to increase the classification accuracy and the deletion of noninformative samples to be memory and runtime efficient. Moreover, we update the parameters in the incremental IVM model without retraining from scratch. Therefore, the incremental classifier is able to deal with large data sets. The performance of the IVM in comparison to support vector machines (SVM) is evaluated in terms of accuracy, and experiments are conducted to assess the potential of the probabilistic outputs of the IVM. Experimental results demonstrate that the IVM and SVM perform similar in terms of classification accuracy. However, the number of import vectors is significantly lower when compared to the number of support vectors, and thus, the computation time during classification can be decreased. Moreover, the probabilities provided by IVM are more reliable, when compared to the probabilistic information, derived from an SVM's output. In addition, the proposed self-training strategy can increase the classification accuracy. Overall, the IVM and its incremental version is worthwhile for the classification of hyperspectral data.},
    doi = {10.1109/TGRS.2012.2184292},
    issn = {0196-2892},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • F. Schindler and W. Förstner, “Real-time Camera Guidance for 3d Scene Reconstruction,” in ISPRS Annals of the Photogrammetry, Remote Sensing and Spatial Information Sciences, 2012.
    [BibTeX] [PDF]

    We propose a framework for multi-view stereo reconstruction exploiting the possibility to interactively guiding the operator during the image acquisition process. Multi-view stereo is a commonly used method to reconstruct both camera trajectory and 3D object shape. After determining an initial solution, a globally optimal reconstruction is usually obtained by executing a bundle adjustment involving all images. Acquiring suitable images, however, still requires an experienced operator to ensure accuracy and completeness of the final solution. We propose an interactive framework for guiding unexperienced users or possibly an autonomous robot. Using approximate camera orientations and object points we estimate point uncertainties within a sliding bundle adjustment and suggest appropriate camera movements. A visual feedback system communicates the decisions to the user in an intuitive way. We demonstrate the suitability of our system with a virtual image acquisition simulation as well as in real-world scenarios. We show that following the camera movements suggested by our system the final scene reconstruction with the automatically extracted key frames is both more complete and more accurate. Possible applications are non-professional 3D acquisition systems on low-cost platforms like mobile phones, autonomously navigating robots as well as online flight planning of unmanned aerial vehicles.

    @InProceedings{schindler2012real,
    title = {Real-time Camera Guidance for 3d Scene Reconstruction},
    author = {Falko Schindler and Wolfgang F\"orstner},
    booktitle = {ISPRS Annals of the Photogrammetry, Remote Sensing and Spatial Information Sciences},
    year = {2012},
    volume = {I-3},
    abstract = {We propose a framework for multi-view stereo reconstruction exploiting the possibility to interactively guiding the operator during the image acquisition process. Multi-view stereo is a commonly used method to reconstruct both camera trajectory and 3D object shape. After determining an initial solution, a globally optimal reconstruction is usually obtained by executing a bundle adjustment involving all images. Acquiring suitable images, however, still requires an experienced operator to ensure accuracy and completeness of the final solution. We propose an interactive framework for guiding unexperienced users or possibly an autonomous robot. Using approximate camera orientations and object points we estimate point uncertainties within a sliding bundle adjustment and suggest appropriate camera movements. A visual feedback system communicates the decisions to the user in an intuitive way. We demonstrate the suitability of our system with a virtual image acquisition simulation as well as in real-world scenarios. We show that following the camera movements suggested by our system the final scene reconstruction with the automatically extracted key frames is both more complete and more accurate. Possible applications are non-professional 3D acquisition systems on low-cost platforms like mobile phones, autonomously navigating robots as well as online flight planning of unmanned aerial vehicles.},
    keywords = {Three-dimensional Reconstruction, Bundle Adjustment, Camera Orientation, Real-time Planning},
    url = {https://www.isprs-ann-photogramm-remote-sens-spatial-inf-sci.net/I-3/69/2012/isprsannals-I-3-69-2012.pdf},
    }

  • J. Schneider, F. Schindler, T. Läbe, and W. Förstner, “Bundle Adjustment for Multi-camera Systems with Points at Infinity,” in ISPRS Annals of Photogrammetry, Remote Sensing and Spatial Information Sciences, 2012, p. 75–80. doi:10.5194/isprsannals-I-3-75-2012
    [BibTeX] [PDF]

    We present a novel approach for a rigorous bundle adjustment for omnidirectional and multi-view cameras, which enables an efficient maximum-likelihood estimation with image and scene points at infinity. Multi-camera systems are used to increase the resolution, to combine cameras with different spectral sensitivities (Z/I DMC, Vexcel Ultracam) or – like omnidirectional cameras – to augment the effective aperture angle (Blom Pictometry, Rollei Panoscan Mark III). Additionally multi-camera systems gain in importance for the acquisition of complex 3D structures. For stabilizing camera orientations – especially rotations – one should generally use points at the horizon over long periods of time within the bundle adjustment that classical bundle adjustment programs are not capable of. We use a minimal representation of homogeneous coordinates for image and scene points. Instead of eliminating the scale factor of the homogeneous vectors by Euclidean normalization, we normalize the homogeneous coordinates spherically. This way we can use images of omnidirectional cameras with single-view point like fisheye cameras and scene points, which are far away or at infinity. We demonstrate the feasibility and the potential of our approach on real data taken with a single camera, the stereo camera FinePix Real 3D W3 from Fujifilm and the multi-camera system Ladybug3 from Point Grey.

    @InProceedings{schneider12isprs,
    title = {Bundle Adjustment for Multi-camera Systems with Points at Infinity},
    author = {J. Schneider and F. Schindler and T. L\"abe and W. F\"orstner},
    booktitle = {ISPRS Annals of Photogrammetry, Remote Sensing and Spatial Information Sciences},
    year = {2012},
    pages = {75--80},
    volume = {I-3},
    abstract = {We present a novel approach for a rigorous bundle adjustment for omnidirectional and multi-view cameras, which enables an efficient maximum-likelihood estimation with image and scene points at infinity. Multi-camera systems are used to increase the resolution, to combine cameras with different spectral sensitivities (Z/I DMC, Vexcel Ultracam) or - like omnidirectional cameras - to augment the effective aperture angle (Blom Pictometry, Rollei Panoscan Mark III). Additionally multi-camera systems gain in importance for the acquisition of complex 3D structures. For stabilizing camera orientations - especially rotations - one should generally use points at the horizon over long periods of time within the bundle adjustment that classical bundle adjustment programs are not capable of. We use a minimal representation of homogeneous coordinates for image and scene points. Instead of eliminating the scale factor of the homogeneous vectors by Euclidean normalization, we normalize the homogeneous coordinates spherically. This way we can use images of omnidirectional cameras with single-view point like fisheye cameras and scene points, which are far away or at infinity. We demonstrate the feasibility and the potential of our approach on real data taken with a single camera, the stereo camera FinePix Real 3D W3 from Fujifilm and the multi-camera system Ladybug3 from Point Grey.},
    city = {Melbourne},
    doi = {10.5194/isprsannals-I-3-75-2012},
    url = {https://www.isprs-ann-photogramm-remote-sens-spatial-inf-sci.net/I-3/75/2012/isprsannals-I-3-75-2012.pdf},
    }

  • L. Spinello, C. Stachniss, and W. Burgard, “Scene in the Loop: Towards Adaptation-by-Tracking in RGB-D Data,” in Proc. of the RSS Workshop RGB-D: Advanced Reasoning with Depth Cameras, 2012.
    [BibTeX] [PDF]
    [none]
    @InProceedings{spinello2012,
    title = {Scene in the Loop: Towards Adaptation-by-Tracking in RGB-D Data},
    author = {L. Spinello and C. Stachniss and W. Burgard},
    booktitle = {Proc. of the RSS Workshop RGB-D: Advanced Reasoning with Depth Cameras},
    year = {2012},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/spinello12rssws.pdf},
    }

  • T. Stroth, “Kartierung landwirtschaftlicher Kulturarten mittels multitemporaler RapidEye und TerraSAR-X Daten,” bachelor thesis Master Thesis, 2012.
    [BibTeX]

    none

    @MastersThesis{stroth2012kartierung,
    title = {Kartierung landwirtschaftlicher Kulturarten mittels multitemporaler RapidEye und TerraSAR-X Daten},
    author = {Stroth, Tobias},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2012},
    type = {bachelor thesis},
    abstract = {none},
    timestamp = {2013.04.15},
    }

  • B. Waske, J. Benediktsson, and J. Sveinsson, “Signal and Image Processing for Remote Sensing,” in Signal and Image Processing for Remote Sensing, Second Edition, 2nd ed., C. H. Chen, Ed., CRC Press, 2012, p. 365–374. doi:10.1201/b11656-21
    [BibTeX]

    Land cover classifications are perhaps the widest used application in context of remote sensing. The recent development of remote sensing systems, including numerous bands, high spatial resolution and increased repetition rates as well as the availability of more diverse remote sensing imagery increase the potential of remote sensing based land cover classifications. Nevertheless, recent data sets demand more sophisticated classifiers and the development of adequate methods in an ongoing research topic in the field of remote sensing. In this context the potential of the ensemble technique Random Forest (RF) for classifying hyperspectral and multisensor remote sensing data is demonstrated. The classification is done on two different data sets, comprising of (i) multispectral and SAR data and (ii) hyperspectral imagery. The results are compared to well known algorithms (e.g. Maximum Likelihood Classifier, Spectral Angle Mapper) as well as recent developments such as Support Vector Machines (SVM). Overall the results demonstrate that RF can be considered desirable for classification of hyperspectral as well as multisensor data sets. RF, significantly outperforms common methods in terms of accuracy and is comparable to SVM. RF achieve high accuracies, even with small training sample, and is simple to handle, because it mainly depends on two user-defined values.

    @InBook{waske2012signal,
    title = {Signal and Image Processing for Remote Sensing},
    author = {Waske, Bj\"orn and Benediktsson, Jon and Sveinsson, Johannes},
    chapter = {Random Forest Classification of Remote Sensing Data},
    editor = {Chen, Chi Hau},
    pages = {365--374},
    publisher = {CRC Press},
    year = {2012},
    edition = {2nd},
    month = feb,
    abstract = {Land cover classifications are perhaps the widest used application in context of remote sensing. The recent development of remote sensing systems, including numerous bands, high spatial resolution and increased repetition rates as well as the availability of more diverse remote sensing imagery increase the potential of remote sensing based land cover classifications. Nevertheless, recent data sets demand more sophisticated classifiers and the development of adequate methods in an ongoing research topic in the field of remote sensing. In this context the potential of the ensemble technique Random Forest (RF) for classifying hyperspectral and multisensor remote sensing data is demonstrated. The classification is done on two different data sets, comprising of (i) multispectral and SAR data and (ii) hyperspectral imagery. The results are compared to well known algorithms (e.g. Maximum Likelihood Classifier, Spectral Angle Mapper) as well as recent developments such as Support Vector Machines (SVM). Overall the results demonstrate that RF can be considered desirable for classification of hyperspectral as well as multisensor data sets. RF, significantly outperforms common methods in terms of accuracy and is comparable to SVM. RF achieve high accuracies, even with small training sample, and is simple to handle, because it mainly depends on two user-defined values.},
    booktitle = {Signal and Image Processing for Remote Sensing, Second Edition},
    doi = {10.1201/b11656-21},
    issn = {978-1-4398-5596-6},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • B. Waske, S. van der Linden, C. Oldenburg, B. Jakimow, A. Rabe, and P. Hostert, “imageRF – A user-oriented implementation for remote sensing image analysis with Random Forests,” Environmental Modelling & Software, vol. 35, p. 192–193, 2012. doi:10.1016/j.envsoft.2012.01.014
    [BibTeX]

    An IDL implementation for the classification and regression analysis of remote sensing images with Random Forests is introduced. The tool, called imageRF, is platform and license independent and uses generic image file formats. It works well with default parameterization, yet all relevant parameters can be defined in intuitive GUIs. This makes it a user-friendly image processing tool, which is implemented as an add-on in the free EnMAP-Box and may be used in the commercial IDL/ENVI software. (C) 2012 Elsevier Ltd. All rights reserved.

    @Article{waske2012imagerf,
    title = {imageRF -- A user-oriented implementation for remote sensing image analysis with Random Forests},
    author = {Waske, Bj\"orn and van der Linden, Sebastian and Oldenburg, Carsten and Jakimow, Benjamin and Rabe, Andreas and Hostert, Patrick},
    journal = {Environmental Modelling \& Software},
    year = {2012},
    month = jul,
    pages = {192--193},
    volume = {35},
    abstract = {An IDL implementation for the classification and regression analysis of remote sensing images with Random Forests is introduced. The tool, called imageRF, is platform and license independent and uses generic image file formats. It works well with default parameterization, yet all relevant parameters can be defined in intuitive GUIs. This makes it a user-friendly image processing tool, which is implemented as an add-on in the free EnMAP-Box and may be used in the commercial IDL/ENVI software. (C) 2012 Elsevier Ltd. All rights reserved.},
    doi = {10.1016/j.envsoft.2012.01.014},
    owner = {waske},
    sn = {1364-8152},
    tc = {0},
    timestamp = {2012.09.04},
    ut = {WOS:000304217500017},
    z8 = {0},
    z9 = {0},
    zb = {0},
    }

  • S. Wenzel and W. Förstner, “Learning a compositional representation for facade object categorization,” in ISPRS Annals of Photogrammetry, Remote Sensing and the Spatial Information Sciences; Proc. of 22nd Congress of the International Society for Photogrammetry and Remote Sensing (ISPRS), 2012, p. 197–202. doi:10.5194/isprsannals-I-3-197-2012
    [BibTeX] [PDF]

    Our objective is the categorization of the most dominant objects in facade images, like windows, entrances and balconies. In order to execute an image interpretation of complex scenes we need an interaction between low level bottom-up feature detection and highlevel inference from top-down. A top-down approach would use results of a bottom-up detection step as evidence for some high-level inference of scene interpretation. We present a statistically founded object categorization procedure that is suited for bottom-up object detection. Instead of choosing a bag of features in advance and learning models based on these features, it is more natural to learn which features best describe the target object classes. Therefore we learn increasingly complex aggregates of line junctions in image sections from man-made scenes. We present a method for the classification of image sections by using the histogram of diverse types of line aggregates.

    @InProceedings{wenzel2012learning,
    title = {Learning a compositional representation for facade object categorization},
    author = {Wenzel, Susanne and F\"orstner, Wolfgang},
    booktitle = {ISPRS Annals of Photogrammetry, Remote Sensing and the Spatial Information Sciences; Proc. of 22nd Congress of the International Society for Photogrammetry and Remote Sensing (ISPRS)},
    year = {2012},
    number = { 2012},
    pages = {197--202},
    volume = {I-3},
    abstract = {Our objective is the categorization of the most dominant objects in facade images, like windows, entrances and balconies. In order to execute an image interpretation of complex scenes we need an interaction between low level bottom-up feature detection and highlevel inference from top-down. A top-down approach would use results of a bottom-up detection step as evidence for some high-level inference of scene interpretation. We present a statistically founded object categorization procedure that is suited for bottom-up object detection. Instead of choosing a bag of features in advance and learning models based on these features, it is more natural to learn which features best describe the target object classes. Therefore we learn increasingly complex aggregates of line junctions in image sections from man-made scenes. We present a method for the classification of image sections by using the histogram of diverse types of line aggregates.},
    city = {Melbourne},
    doi = {10.5194/isprsannals-I-3-197-2012},
    proceeding = {ISPRS Annals of Photogrammetry, Remote Sensing and the Spatial Information Sciences; Proc. of 22nd Congress of the International Society for Photogrammetry and Remote Sensing (ISPRS)},
    url = {https://www.ipb.uni-bonn.de/pdfs/Wenzel2012Learning.pdf},
    }

  • Spatial Cognition VIII, C. Stachniss, K. Schill, and D. Uttal, Eds., Springer, 2012.
    [BibTeX]
    [none]
    @Book{stachniss2012a,
    title = {Spatial Cognition VIII},
    editor = {C. Stachniss and K. Schill and D. Uttal},
    publisher = {Springer},
    year = {2012},
    month = {August},
    abstract = {[none]},
    timestamp = {2014.04.24},
    }

2011

  • S. Asadi, M. Reggente, C. Stachniss, C. Plagemann, and A. J. Lilienthal, “Intelligent Systems for Machine Olfaction: Tools and Methodologies,” , E. L. Hines and M. S. Leeson, Eds., IGI Global, 2011, pp. 153-179.
    [BibTeX]
    [none]
    @InBook{asadi2011,
    title = {Intelligent Systems for Machine Olfaction: Tools and Methodologies},
    author = {S. Asadi and M. Reggente and C. Stachniss and C. Plagemann and A.J. Lilienthal},
    chapter = {Statistical Gas Distribution Modelling using Kernel Methods},
    editor = {E.L. Hines and M.S. Leeson},
    pages = {153-179},
    publisher = {{IGI} {G}lobal},
    year = {2011},
    abstract = {[none]},
    timestamp = {2014.04.24},
    }

  • S. D. Bauer, F. Korč, and W. Förstner, “The potential of automatic methods of classification to identify leaf diseases from multispectral images,” Precision Agriculture, vol. 12, iss. 3, p. 361–377, 2011. doi:10.1007/s11119-011-9217-6
    [BibTeX] [PDF]

    Three methods of automatic classification of leaf diseases are described based on high-resolution multispectral stereo images. Leaf diseases are economically important as they can cause a loss of yield. Early and reliable detection of leaf diseases has important practical relevance, especially in the context of precision agriculture for localized treatment with fungicides. We took stereo images of single sugar beet leaves with two cameras (RGB and multispectral) in a laboratory under well controlled illumination conditions. The leaves were either healthy or infected with the leaf spot pathogen Cercospora beticola or the rust fungus Uromyces betae. To fuse information from the two sensors, we generated 3-D models of the leaves. We discuss the potential of two pixelwise methods of classification: k-nearest neighbour and an adaptive Bayes classification with minimum risk assuming a Gaussian mixture model. The medians of pixelwise classification rates achieved in our experiments are 91% for Cercospora beticola and 86% for Uromyces betae. In addition, we investigated the potential of contextual classification with the so called conditional random field method, which seemed to eliminate the typical errors of pixelwise classification.

    @Article{bauer2011potential,
    title = {The potential of automatic methods of classification to identify leaf diseases from multispectral images},
    author = {Bauer, Sabine Daniela and Kor{\vc}, Filip and F\"orstner, Wolfgang},
    journal = {Precision Agriculture},
    year = {2011},
    number = {3},
    pages = {361--377},
    volume = {12},
    abstract = {Three methods of automatic classification of leaf diseases are described based on high-resolution multispectral stereo images. Leaf diseases are economically important as they can cause a loss of yield. Early and reliable detection of leaf diseases has important practical relevance, especially in the context of precision agriculture for localized treatment with fungicides. We took stereo images of single sugar beet leaves with two cameras (RGB and multispectral) in a laboratory under well controlled illumination conditions. The leaves were either healthy or infected with the leaf spot pathogen Cercospora beticola or the rust fungus Uromyces betae. To fuse information from the two sensors, we generated 3-D models of the leaves. We discuss the potential of two pixelwise methods of classification: k-nearest neighbour and an adaptive Bayes classification with minimum risk assuming a Gaussian mixture model. The medians of pixelwise classification rates achieved in our experiments are 91% for Cercospora beticola and 86% for Uromyces betae. In addition, we investigated the potential of contextual classification with the so called conditional random field method, which seemed to eliminate the typical errors of pixelwise classification.},
    doi = {10.1007/s11119-011-9217-6},
    url = {https://www.ipb.uni-bonn.de/pdfs/Bauer2011potential.pdf},
    }

  • J. Becker, C. Bersch, D. Pangercic, B. Pitzer, T. Rühr, B. Sankaran, J. Sturm, C. Stachniss, M. Beetz, and W. Burgard, “Mobile Manipulation of Kitchen Containers,” in Proc. of the IROS’11 Workshop on Results, Challenges and Lessons Learned in Advancing Robots with a Common Platform, San Francisco, CA, USA, 2011.
    [BibTeX] [PDF]
    [none]
    @InProceedings{becker2011,
    title = {Mobile Manipulation of Kitchen Containers},
    author = {J. Becker and C. Bersch and D. Pangercic and B. Pitzer and T. R\"uhr and B. Sankaran and J. Sturm and C. Stachniss and M. Beetz and W. Burgard},
    booktitle = {Proc. of the IROS'11 Workshop on Results, Challenges and Lessons Learned in Advancing Robots with a Common Platform},
    year = {2011},
    address = {San Francisco, CA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/becker11irosws.pdf},
    }

  • M. Bennewitz, D. Maier, A. Hornung, and C. Stachniss, “Integrated Perception and Navigation in Complex Indoor Environments,” in Proc. of the IEEE-RAS Int. Conf. on Humanoid Robots (HUMANOIDS), 2011.
    [BibTeX]
    [none]
    @InProceedings{bennewitz2011,
    title = {Integrated Perception and Navigation in Complex Indoor Environments},
    author = {M. Bennewitz and D. Maier and A. Hornung and C. Stachniss},
    booktitle = {Proc. of the IEEE-RAS Int. Conf. on Humanoid Robots (HUMANOIDS)},
    year = {2011},
    note = {Invited presentation at the workshop on Humanoid service robot navigation in crowded and dynamic environments},
    abstract = {[none]},
    timestamp = {2014.04.24},
    }

  • T. Dickscheid, F. Schindler, and W. Förstner, “Coding Images with Local Features,” International Journal of Computer Vision, vol. 94, iss. 2, p. 154–174, 2011. doi:10.1007/s11263-010-0340-z
    [BibTeX] [PDF]

    We present a scheme for measuring completeness of local feature extraction in terms of image coding. Completeness is here considered as good coverage of relevant image information by the features. As each feature requires a certain number of bits which are representative for a certain subregion of the image, we interpret the coverage as a sparse coding scheme. The measure is therefore based on a comparison of two densities over the image domain: An entropy density p_H(x) based on local image statistics, and a feature coding density p_c(x) which is directly computed from each particular set of local features. Motivated by the coding scheme in JPEG, the entropy distribution is derived from the power spectrum of local patches around each pixel position in a statistically sound manner. As the total number of bits for coding the image and for representing it with local features may be different, we measure incompleteness by the Hellinger distance between p_H(x) and p_c(x). We will derive a procedure for measuring incompleteness of possibly mixed sets of local features and show results on standard datasets using some of the most popular region and keypoint detectors, including Lowe, MSER and the recently published SFOP detectors. Furthermore, we will draw some interesting conclusions about the complementarity of detectors.

    @Article{dickscheid2011coding,
    title = {Coding Images with Local Features},
    author = {Dickscheid, Timo and Schindler, Falko and F\"orstner, Wolfgang},
    journal = {International Journal of Computer Vision},
    year = {2011},
    number = {2},
    pages = {154--174},
    volume = {94},
    abstract = {We present a scheme for measuring completeness of local feature extraction in terms of image coding. Completeness is here considered as good coverage of relevant image information by the features. As each feature requires a certain number of bits which are representative for a certain subregion of the image, we interpret the coverage as a sparse coding scheme. The measure is therefore based on a comparison of two densities over the image domain: An entropy density p_H(x) based on local image statistics, and a feature coding density p_c(x) which is directly computed from each particular set of local features. Motivated by the coding scheme in JPEG, the entropy distribution is derived from the power spectrum of local patches around each pixel position in a statistically sound manner. As the total number of bits for coding the image and for representing it with local features may be different, we measure incompleteness by the Hellinger distance between p_H(x) and p_c(x). We will derive a procedure for measuring incompleteness of possibly mixed sets of local features and show results on standard datasets using some of the most popular region and keypoint detectors, including Lowe, MSER and the recently published SFOP detectors. Furthermore, we will draw some interesting conclusions about the complementarity of detectors.},
    doi = {10.1007/s11263-010-0340-z},
    issn = {0920-5691},
    issue = {2},
    publisher = {Springer Netherlands},
    url = {https://www.ipb.uni-bonn.de/pdfs/Dickscheid2011Coding.pdf},
    }

  • T. F. Dominicus, “Vergleich von Verfahren zur Rekonstruktion von Oberflächen,” bachelor thesis Master Thesis, 2011.
    [BibTeX]

    \textbf{Summary} There is a growing demand for digital 3D-models in various disciplines. Dense point clouds are often the basis for these. These point clouds can be generated by a variety of different methods. One possible method is Stereo matching. There are different approaches to this. In this thesis, we examine three different Stereo matching Algorithms and compare their qualities with respect to accuracy, point density and point distribution. The used Algorithms are the Patch-based Multi-view stereo Software, the Semi-global Matching and the 3-Image Semi-global matching. In order to test these methods, we conduct two experiments. Each method is used to create dense point cloud, which we then compare to a reference cloud. The reference clouds are predetermined in the first Experiment and gathered with a Laser triangulation scanner in the second. The resulting point cloud is then analyzed. We predicted, that both SGM Algorithms perform better than the PMVS all examined characteristics. However, our experiments show that this is only true under certain conditions. While the point density and distribution is considerably higher in the first experiment, the accuracy is slightly lower compared to the PMVS. Both SGM methods show even worse results in the second experiment. Here, the density of the results of the SGM is lower and the distribution is slightly better. The accuracy of the SGM is on the same level as the PMVS. The 3-Image SGM only produced only a very sparse point cloud with a high number of outliers. We could not calculate an accuracy rating for this method. However, we assume that these findings are due to poor camera orientation in the second experiment. \textbf{Zusammenfassung} Der Bedarf an digitalen 3D-Modellen in verschiedenen Disziplinen nimmt stetig zu. Grundlage dafür sind oft Dichte Punktwolken. Diese Punktwolken können mit Hilfe verschiedener Verfahren erstellt werden. Eine Möglichkeit ist das Stereomatching. Dabei gibt es verschiedene Ansätze. In dieser Arbeit untersuchen wir drei verschiedene Stereomatching Algorithmen und vergleichen deren Eigenschaften in Bezug auf Genauigkeit, Punktdichte und Punktverteilung. Die verwendeten Verfahren sind die Multi-view stereo Software, das Semi-global Matching und das 3-Bild Semi-global matching. Um diese Verfahren zu untersuchen haben wir zwei Experimente durchgeführt. Wir verwenden jede dieser Methoden um eine dichte Punktwolke aus mehreren Bildern einer Szene zu erstellen. Diese Punktwolken vergleichen wir dann mit einer Referenzpunktwolke. Im ersten Experiment ist diese Referenz vorgegeben. Im zweiten Experiment erstellen wir diese Referenz, in dem wir die Szene mit einem Lasertriangulationsscanner erfassen . Wir hatten erwartet, dass die beiden SGM Algorithmen in allen drei Eigenschaften dem PMVS überlegen ist. Unsere Experimente zeigen jedoch, dass dies nur unter bestimmten Bedingungen der Fall ist. Während die Punktdichte im ersten Experiment beim SGM deutlich höher und die Punktverteilung besser ist, ist die Genauigkeit etwas geringer als die des PMVS. Beide SGM Verfahren bringen im zweiten Experiment noch schlechtere Ergebnisse. Die Punktdichte in den Punktwolken des SGM ist geringer und die Punktverteilung leicht besser. Die Genauigkeit des SGM ist leicht schlechter als die des PMVS. The 3-Image SGM only produced only a very sparse point cloud with a high number of outliers. We could not calculate an accuracy rating for this method. However, we assume that these findings are due to poor camera orientation in the second experiment. Das 3-Bild SGM berechnet hier nur eine sehr dünne Punktwolke mit einer hohen Zahl an Ausreißern. Wir konnten keine Punktwolke erstellen, bei der die Berechnung der Genauigkeit sinnvoll gewesen wäre. Wir vermuten jedoch, dass dies nicht am Algorithmus, sondern an einer schlechten Orientierung der Kameras im zweiten Experiment liegt.

    @MastersThesis{dominicus2011vergleich,
    title = {Vergleich von Verfahren zur Rekonstruktion von Oberfl\"achen},
    author = {Dominicus, Tim Florian},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2011},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Inf. Jan Siegemund},
    type = {bachelor thesis},
    abstract = {\textbf{Summary} There is a growing demand for digital 3D-models in various disciplines. Dense point clouds are often the basis for these. These point clouds can be generated by a variety of different methods. One possible method is Stereo matching. There are different approaches to this. In this thesis, we examine three different Stereo matching Algorithms and compare their qualities with respect to accuracy, point density and point distribution. The used Algorithms are the Patch-based Multi-view stereo Software, the Semi-global Matching and the 3-Image Semi-global matching. In order to test these methods, we conduct two experiments. Each method is used to create dense point cloud, which we then compare to a reference cloud. The reference clouds are predetermined in the first Experiment and gathered with a Laser triangulation scanner in the second. The resulting point cloud is then analyzed. We predicted, that both SGM Algorithms perform better than the PMVS all examined characteristics. However, our experiments show that this is only true under certain conditions. While the point density and distribution is considerably higher in the first experiment, the accuracy is slightly lower compared to the PMVS. Both SGM methods show even worse results in the second experiment. Here, the density of the results of the SGM is lower and the distribution is slightly better. The accuracy of the SGM is on the same level as the PMVS. The 3-Image SGM only produced only a very sparse point cloud with a high number of outliers. We could not calculate an accuracy rating for this method. However, we assume that these findings are due to poor camera orientation in the second experiment. \textbf{Zusammenfassung} Der Bedarf an digitalen 3D-Modellen in verschiedenen Disziplinen nimmt stetig zu. Grundlage daf\"ur sind oft Dichte Punktwolken. Diese Punktwolken k\"onnen mit Hilfe verschiedener Verfahren erstellt werden. Eine M\"oglichkeit ist das Stereomatching. Dabei gibt es verschiedene Ans\"atze. In dieser Arbeit untersuchen wir drei verschiedene Stereomatching Algorithmen und vergleichen deren Eigenschaften in Bezug auf Genauigkeit, Punktdichte und Punktverteilung. Die verwendeten Verfahren sind die Multi-view stereo Software, das Semi-global Matching und das 3-Bild Semi-global matching. Um diese Verfahren zu untersuchen haben wir zwei Experimente durchgef\"uhrt. Wir verwenden jede dieser Methoden um eine dichte Punktwolke aus mehreren Bildern einer Szene zu erstellen. Diese Punktwolken vergleichen wir dann mit einer Referenzpunktwolke. Im ersten Experiment ist diese Referenz vorgegeben. Im zweiten Experiment erstellen wir diese Referenz, in dem wir die Szene mit einem Lasertriangulationsscanner erfassen . Wir hatten erwartet, dass die beiden SGM Algorithmen in allen drei Eigenschaften dem PMVS \"uberlegen ist. Unsere Experimente zeigen jedoch, dass dies nur unter bestimmten Bedingungen der Fall ist. W\"ahrend die Punktdichte im ersten Experiment
    beim SGM deutlich h\"oher und die Punktverteilung besser ist, ist die Genauigkeit etwas geringer als die des PMVS. Beide SGM Verfahren bringen im zweiten Experiment noch schlechtere Ergebnisse. Die Punktdichte in den Punktwolken des SGM ist geringer und die Punktverteilung leicht besser. Die Genauigkeit des SGM ist leicht schlechter als die des PMVS. The 3-Image SGM only produced only a very sparse point cloud with a high number of outliers. We could not calculate an accuracy rating for this method. However, we assume that these findings are due to poor camera orientation in the second experiment. Das 3-Bild SGM berechnet hier nur eine sehr d\"unne Punktwolke mit einer hohen Zahl an Ausrei{\ss}ern. Wir konnten keine Punktwolke erstellen, bei der die Berechnung der Genauigkeit sinnvoll gewesen w\"are. Wir vermuten jedoch, dass dies nicht am Algorithmus, sondern an einer schlechten Orientierung der Kameras im zweiten Experiment liegt.},
    city = {Bonn},
    }

  • B. Frank, C. Stachniss, N. Abdo, and W. Burgard, “Using Gaussian Process Regression for Efficient Motion Planning in Environments with Deformable Objects,” in Proc. of the AAAI-11 Workshop on Automated Action Planning for Autonomous Mobile Robots (PAMR), San Francisco, CA, USA, 2011.
    [BibTeX] [PDF]
    [none]
    @InProceedings{frank2011,
    title = {Using Gaussian Process Regression for Efficient Motion Planning in Environments with Deformable Objects},
    author = {B. Frank and C. Stachniss and N. Abdo and W. Burgard},
    booktitle = {Proc. of the AAAI-11 Workshop on Automated Action Planning for Autonomous Mobile Robots (PAMR)},
    year = {2011},
    address = {San Francisco, CA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/frank11pamr.pdf},
    }

  • B. Frank, C. Stachniss, N. Abdo, and W. Burgard, “Efficient Motion Planning for Manipulation Robots in Environments with Deformable Objects,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), San Francisco, CA, USA, 2011.
    [BibTeX] [PDF]
    [none]
    @InProceedings{frank2011a,
    title = {Efficient Motion Planning for Manipulation Robots in Environments with Deformable Objects},
    author = {B. Frank and C. Stachniss and N. Abdo and W. Burgard},
    booktitle = iros,
    year = {2011},
    address = {San Francisco, CA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/frank11iros.pdf},
    }

  • M. Hans and R. Roscher, “Zuordnen radiometrischer Informationen zu Laserscandaten von Weintrauben,” Department of Photogrammetry, University of Bonn 2011.
    [BibTeX] [PDF]

    In diesem Report stellen wir zwei Verfahren vor, die radiometrische Informationen 3D-Scandaten zuordnen. Radiometrische Informationen unterstützen und verbessern die Anwendungen der Merkmalserfassung von Objekten, da sie weitere Kenntnisse über das gescannte Objekt liefern.

    @TechReport{hans2011zuordnen,
    title = {Zuordnen radiometrischer Informationen zu Laserscandaten von Weintrauben},
    author = {Hans, Mathias and Roscher, Ribana},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2011},
    abstract = {In diesem Report stellen wir zwei Verfahren vor, die radiometrische Informationen 3D-Scandaten zuordnen. Radiometrische Informationen unterst\"utzen und verbessern die Anwendungen der Merkmalserfassung von Objekten, da sie weitere Kenntnisse \"uber das gescannte Objekt liefern.},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Hans2011Zuordnen.pdf},
    }

  • R. Kümmerle, G. Grisetti, C. Stachniss, and W. Burgard, “Simultaneous Parameter Calibration, Localization, and Mapping for Robust Service Robotics,” in Proc. of the IEEE Workshop on Advanced Robotics and its Social Impacts, Half-Moon Bay, CA, USA, 2011.
    [BibTeX] [PDF]
    [none]
    @InProceedings{kummerle2011,
    title = {Simultaneous Parameter Calibration, Localization, and Mapping for Robust Service Robotics},
    author = {R. K\"ummerle and G. Grisetti and C. Stachniss and W. Burgard},
    booktitle = {Proc. of the IEEE Workshop on Advanced Robotics and its Social Impacts},
    year = {2011},
    address = {Half-Moon Bay, CA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/kuemmerle11arso.pdf},
    }

  • S. Klemenjak and B. Waske, “Classifying Multilevel Segmented TerraSAR-X Data, using Support Vector Machines,” in 4th TerraSAR-X Science Team Meeting, 2011.
    [BibTeX] [PDF]

    To segment a image with strongly varying object sizes results generally in under-segmentation of small structures or over-segmentation of big ones, which consequences poor classification accuracies. A strategy to produce multiple segmentations of one image and classification with support vector machines (SVM) of this segmentation stack afterwards is shown.

    @InProceedings{klemenjak2011classifying,
    title = {Classifying Multilevel Segmented TerraSAR-X Data, using Support Vector Machines},
    author = {Klemenjak, Sascha and Waske, Bj\"orn},
    booktitle = {4th TerraSAR-X Science Team Meeting},
    year = {2011},
    abstract = {To segment a image with strongly varying object sizes results generally in under-segmentation of small structures or over-segmentation of big ones, which consequences poor classification accuracies. A strategy to produce multiple segmentations of one image and classification with support vector machines (SVM) of this segmentation stack afterwards is shown.},
    owner = {waske},
    timestamp = {2012.09.05},
    url = {https://www.ipb.uni-bonn.de/pdfs/Klemenjak2011Classifying.pdf},
    }

  • H. Kretzschmar and C. Stachniss, “Pose Graph Compression for Laser-based SLAM,” , Flagstaff, AZ, USA, 2011.
    [BibTeX] [PDF]
    [none]
    @InProceedings{kretzschmar2011a,
    title = {Pose Graph Compression for Laser-based {SLAM}},
    author = {H. Kretzschmar and C. Stachniss},
    booktitle = isrr,
    year = {2011},
    address = {Flagstaff, AZ, USA},
    note = {Invited presentation},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/stachniss11isrr.pdf},
    }

  • H. Kretzschmar, C. Stachniss, and G. Grisetti, “Efficient Information-Theoretic Graph Pruning for Graph-Based SLAM with Laser Range Finders,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), San Francisco, CA, USA, 2011.
    [BibTeX] [PDF]
    [none]
    @InProceedings{kretzschmar2011,
    title = {Efficient Information-Theoretic Graph Pruning for Graph-Based {SLAM} with Laser Range Finders},
    author = {H. Kretzschmar and C. Stachniss and G. Grisetti},
    booktitle = iros,
    year = {2011},
    address = {San Francisco, CA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/kretzschmar11iros.pdf},
    }

  • B. Mack and B. Waske, “Optimizing support vector data description by automatically generated outliers.,” in 7th Works. of the EARSeL Special Interest Group Imaging Spectroscopy, 2011.
    [BibTeX]
    [none]
    @InProceedings{mack2011optimizing,
    title = {Optimizing support vector data description by automatically generated outliers.},
    author = {Mack, Benjamin and Waske, Bj{\"o}rn},
    booktitle = {7th Works. of the EARSeL Special Interest Group Imaging Spectroscopy},
    year = {2011},
    abstract = {[none]},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • D. Maier, M. Bennewitz, and C. Stachniss, “Self-supervised Obstacle Detection for Humanoid Navigation Using Monocular Vision and Sparse Laser Data,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Shanghai, China, 2011.
    [BibTeX] [PDF]
    [none]
    @InProceedings{maier2011,
    title = {Self-supervised Obstacle Detection for Humanoid Navigation Using Monocular Vision and Sparse Laser Data},
    author = {D. Maier and M. Bennewitz and C. Stachniss},
    booktitle = icra,
    year = {2011},
    address = {Shanghai, China},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/maier11icra.pdf},
    }

  • R. Roscher, F. Schindler, and W. Förstner, “What would you look like in Springfield%3F Linear Transformations between High-Dimensional Spaces,” Department of Photogrammetry, University of Bonn 2011.
    [BibTeX] [PDF]

    High-dimensional data structures occur in many fields of computer vision and machine learning. Transformation between two high-dimensional spaces usually involves the determination of a large amount of parameters and requires much labeled data to be given. There is much interest in reducing dimensionality if a lower-dimensional structure is underlying the data points. We present a procedure to enable the determination of a low-dimensional, projective transformation between two data sets, making use of state-of-the-art dimensional reduction algorithms. We evaluate multiple algorithms during several experiments with different objectives. We demonstrate the use of this procedure for applications like classification and assignments between two given data sets. Our procedure is semi-supervised due to the fact that all labeled and unlabeled points are used for the dimensionality reduction, but only few them have to be labeled. Using test data we evaluate the quantitative and qualitative performance of different algorithms with respect to the classification and assignment task. We show that with these algorithms and our transformation approach high-dimensional data sets can be related to each other. Finally we can use this procedure to match real world facial images with cartoon images from Springfield, home town of the famous Simpsons.

    @TechReport{roscher2011what,
    title = {What would you look like in Springfield? Linear Transformations between High-Dimensional Spaces},
    author = {Roscher, Ribana and Schindler, Falko and F\"orstner, Wolfgang},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2011},
    abstract = {High-dimensional data structures occur in many fields of computer vision and machine learning. Transformation between two high-dimensional spaces usually involves the determination of a large amount of parameters and requires much labeled data to be given. There is much interest in reducing dimensionality if a lower-dimensional structure is underlying the data points. We present a procedure to enable the determination of a low-dimensional, projective transformation between two data sets, making use of state-of-the-art dimensional reduction algorithms. We evaluate multiple algorithms during several experiments with different objectives. We demonstrate the use of this procedure for applications like classification and assignments between two given data sets. Our procedure is semi-supervised due to the fact that all labeled and unlabeled points are used for the dimensionality reduction, but only few them have to be labeled. Using test data we evaluate the quantitative and qualitative performance of different algorithms with respect to the classification and assignment task. We show that with these algorithms and our transformation approach high-dimensional data sets can be related to each other. Finally we can use this procedure to match real world facial images with cartoon images from Springfield, home town of the famous Simpsons.},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Roscher2011What.pdf},
    }

  • R. Roscher, B. Waske, and W. Förstner, “Incremental import vector machines for large area land cover classification,” in IEEE International Conf. on Computer Vision Workshops (ICCV Workshops), 2011. doi:10.1109/ICCVW.2011.6130249
    [BibTeX]

    The classification of large areas consisting of multiple scenes is challenging regarding the handling of large and therefore mostly inhomogeneous data sets. Moreover, large data sets demand for computational efficient methods. We propose a method, which enables the efficient multi-class classification of large neighboring Landsat scenes. We use an incremental realization of the import vector machines, called I2VM, in combination with self-training to update an initial learned classifier with new training data acquired in the overlapping areas between neighboring Landsat scenes. We show in our experiments, that I2VM is a suitable classifier for large area land cover classification.

    @InProceedings{roscher2011incremental,
    title = {Incremental import vector machines for large area land cover classification},
    author = {Roscher, Ribana and Waske, Bj\"orn and F\"orstner, Wolfgang},
    booktitle = {{IEEE} International Conf. on Computer Vision Workshops (ICCV Workshops)},
    year = {2011},
    abstract = {The classification of large areas consisting of multiple scenes is challenging regarding the handling of large and therefore mostly inhomogeneous data sets. Moreover, large data sets demand for computational efficient methods. We propose a method, which enables the efficient multi-class classification of large neighboring Landsat scenes. We use an incremental realization of the import vector machines, called I2VM, in combination with self-training to update an initial learned classifier with new training data acquired in the overlapping areas between neighboring Landsat scenes. We show in our experiments, that I2VM is a suitable classifier for large area land cover classification.},
    doi = {10.1109/ICCVW.2011.6130249},
    keywords = {incremental import vector machines;inhomogeneous data sets;land cover classification;neighboring Landsat scenes;scenes classification;training data acquisition;data acquisition;geophysical image processing;image classification;natural scenes;support vector machines;terrain mapping;},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • H. Sardemann, “Registrierung von Bildern mit 3D-Punktwolken,” bachelor thesis Master Thesis, 2011.
    [BibTeX]
    [none]
    @MastersThesis{sardemann2011registrierung,
    title = {Registrierung von Bildern mit 3D-Punktwolken},
    author = {Sardemann, Hannes},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2011},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.- Ing Falko Schindler},
    type = {bachelor thesis},
    abstract = {[none]},
    city = {Bonn},
    }

  • F. Schindler and W. Förstner, “Fast Marching for Robust Surface Segmentation,” in LNCS, Photogrammetric Image Analysis, Munich, 2011, p. 147–158. doi:10.1007/978-3-642-24393-6
    [BibTeX] [PDF]

    We propose a surface segmentation method based on Fast Marching Farthest Point Sampling designed for noisy, visually reconstructed point clouds or laser range data. Adjusting the distance metric between neighboring vertices we obtain robust, edge-preserving segmentations based on local curvature. We formulate a cost function given a segmentation in terms of a description length to be minimized. An incremental-decremental segmentation procedure approximates a global optimum of the cost function and prevents from under- as well as strong over-segmentation. We demonstrate the proposed method on various synthetic and real-world data sets.

    @InProceedings{schindler2011fast,
    title = {Fast Marching for Robust Surface Segmentation},
    author = {Schindler, Falko and F\"orstner, Wolfgang},
    booktitle = {LNCS, Photogrammetric Image Analysis},
    year = {2011},
    address = {Munich},
    note = {Volume Editors: Stilla, Uwe and Rottensteiner, Franz and Mayer, Helmut and Jutzi, Boris and Butenuth, Matthias},
    pages = {147--158},
    abstract = {We propose a surface segmentation method based on Fast Marching Farthest Point Sampling designed for noisy, visually reconstructed point clouds or laser range data. Adjusting the distance metric between neighboring vertices we obtain robust, edge-preserving segmentations based on local curvature. We formulate a cost function given a segmentation in terms of a description length to be minimized. An incremental-decremental segmentation procedure approximates a global optimum of the cost function and prevents from under- as well as strong over-segmentation. We demonstrate the proposed method on various synthetic and real-world data sets.},
    doi = {10.1007/978-3-642-24393-6},
    url = {https://www.ipb.uni-bonn.de/pdfs/Schindler2011Fast.pdf},
    }

  • F. Schindler, W. Förstner, and J. Frahm, “Classification and Reconstruction of Surfaces from Point Clouds of Man-made Objects,” in International Conf. on Computer Vision, IEEE/ISPRS Workshop on Computer Vision for Remote Sensing of the Environment, Barcelona, 2011, p. 257–263. doi:10.1109/ICCVW.2011.6130251
    [BibTeX] [PDF]

    We present a novel surface model and reconstruction method for man-made environments that take prior knowledge about topology and geometry into account. The model favors but is not limited to horizontal and vertical planes that are pairwise orthogonal. The reconstruction method does not require one particular class of sensors, as long as a triangulated point cloud is available. It delivers a complete 3D segmentation, parametrization and classification for both surface regions and inter-plane relations. By working on a pre-segmentation we reduce the computational cost and increase robustness to noise and outliers. All reasoning is statistically motivated, based on a few decision variables with meaningful interpretation in measurement space. We demonstrate our reconstruction method for visual reconstructions and laser range data.

    @InProceedings{schindler2011classification,
    title = {Classification and Reconstruction of Surfaces from Point Clouds of Man-made Objects},
    author = {Schindler, Falko and F\"orstner, Wolfgang and Frahm, Jan-Michael},
    booktitle = {International Conf. on Computer Vision, IEEE/ISPRS Workshop on Computer Vision for Remote Sensing of the Environment},
    year = {2011},
    address = {Barcelona},
    note = {Organizers: Schindler, Konrad and F\"orstner, Wolfgang and Paparoditis, Nicolas},
    pages = {257--263},
    abstract = {We present a novel surface model and reconstruction method for man-made environments that take prior knowledge about topology and geometry into account. The model favors but is not limited to horizontal and vertical planes that are pairwise orthogonal. The reconstruction method does not require one particular class of sensors, as long as a triangulated point cloud is available. It delivers a complete 3D segmentation, parametrization and classification for both surface regions and inter-plane relations. By working on a pre-segmentation we reduce the computational cost and increase robustness to noise and outliers. All reasoning is statistically motivated, based on a few decision variables with meaningful interpretation in measurement space. We demonstrate our reconstruction method for visual reconstructions and laser range data.},
    city = {Barcelona},
    doi = {10.1109/ICCVW.2011.6130251},
    proceeding = {ICCV Workshop on Computer Vision for Remote Sensing of the Environment},
    url = {https://www.ipb.uni-bonn.de/pdfs/Schindler2011Classification.pdf},
    }

  • J. Schittenhelm, “Empirische Untersuchungen zum Einsatz des SFOP-Punktdetektors zur Objektdetektion,” diploma thesis Master Thesis, 2011.
    [BibTeX] [PDF]
    [none]
    @MastersThesis{schittenhelm2011empirische,
    title = {Empirische Untersuchungen zum Einsatz des SFOP-Punktdetektors zur Objektdetektion},
    author = {Schittenhelm, J\"org},
    school = {University of Bonn},
    year = {2011},
    type = {diploma thesis},
    abstract = {[none]},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Schittenhelm2011Empirische.pdf},
    }

  • B. Schmeing, T. Läbe, and W. Förstner, “Trajectory Reconstruction Using Long Sequences of Digital Images From an Omnidirectional Camera,” in Proc. of the 31th DGPF Conf. (Jahrestagung), Mainz, 2011, p. 443–452.
    [BibTeX] [PDF]

    We present a method to perform bundle adjustment using long sequences of digital images from an omnidirectional camera. We use the Ladybug3 camera from PointGrey, which consists of six individual cameras pointing in different directions. There is large overlap between successive images but only a few loop closures provide connections between distant camera positions. We face two challenges: (1) to perform a bundle adjustment with images of an omnidirectional camera and (2) implement outlier detection and estimation of initial parameters for the geometry described above. Our program combines the Ladybug?s individual cameras to a single virtual camera and uses a spherical imaging model within the bundle adjustment, solving problem (1). Outlier detection (2) is done using bundle adjustments with small subsets of images followed by a robust adjustment of all images. Approximate values in our context are taken from an on-board inertial navigation system.

    @InProceedings{schmeing2011trajectory,
    title = {Trajectory Reconstruction Using Long Sequences of Digital Images From an Omnidirectional Camera},
    author = {Schmeing, Benno and L\"abe, Thomas and F\"orstner, Wolfgang},
    booktitle = {Proc. of the 31th DGPF Conf. (Jahrestagung)},
    year = {2011},
    address = {Mainz},
    pages = {443--452},
    abstract = {We present a method to perform bundle adjustment using long sequences of digital images from an omnidirectional camera. We use the Ladybug3 camera from PointGrey, which consists of six individual cameras pointing in different directions. There is large overlap between successive images but only a few loop closures provide connections between distant camera positions. We face two challenges: (1) to perform a bundle adjustment with images of an omnidirectional camera and (2) implement outlier detection and estimation of initial parameters for the geometry described above. Our program combines the Ladybug?s individual cameras to a single virtual camera and uses a spherical imaging model within the bundle adjustment, solving problem (1). Outlier detection (2) is done using bundle adjustments with small subsets of images followed by a robust adjustment of all images. Approximate values in our context are taken from an on-board inertial navigation system.},
    city = {Mainz},
    proceeding = {Proc. of the 31th DGPF Conf. (Jahrestagung)},
    url = {https://www.ipb.uni-bonn.de/pdfs/Schmeing2011Trajectory.pdf},
    }

  • J. Schneider, F. Schindler, and W. Förstner, “Bündelausgleichung für Multikamerasysteme,” in Proc. of the 31th DGPF Conf., 2011.
    [BibTeX] [PDF]

    Wir stellen einen Ansatz für eine strenge Bündelausgleichung für Multikamerasysteme vor. Hierzu verwenden wir eine minimale Repräsentation von homogenen Koordinatenvektoren für eine Maximum-Likelihood-Schätzung. Statt den Skalierungsfaktor von homogenen Vektoren durch Verwendung von euklidischen Grö\ssen zu eliminieren, werden die homogenen Koordinaten sphärisch normiert, so dass Bild- und Objektpunkte im Unendlichen repräsentierbar bleiben. Dies ermöglicht auch Bilder omnidirektionaler Kameras mit Einzelblickpunkt, wie Fisheyekameras, und weit entfernte bzw. unendlich ferne Punkte zu behandeln. Speziell Punkte am Horizont können über lange Zeiträume beobachtet werden und liefern somit eine stabile Richtungsinformation. Wir demonstrieren die praktische Umsetzung des Ansatzes anhand einer Bildfolge mit dem Multikamerasystem Ladybug3 von Point Grey, welches mit sechs Kameras 80 % der gesamten Sphäre abbildet.

    @InProceedings{schneider11dgpf,
    title = {B\"undelausgleichung f\"ur Multikamerasysteme},
    author = {J. Schneider and F. Schindler and W. F\"orstner},
    booktitle = {Proc. of the 31th DGPF Conf.},
    year = {2011},
    abstract = {Wir stellen einen Ansatz f\"ur eine strenge B\"undelausgleichung f\"ur Multikamerasysteme vor. Hierzu verwenden wir eine minimale Repr\"asentation von homogenen Koordinatenvektoren f\"ur eine Maximum-Likelihood-Sch\"atzung. Statt den Skalierungsfaktor von homogenen Vektoren durch Verwendung von euklidischen Gr\"o\ssen zu eliminieren, werden die homogenen Koordinaten sph\"arisch normiert, so dass Bild- und Objektpunkte im Unendlichen repr\"asentierbar bleiben. Dies erm\"oglicht auch Bilder omnidirektionaler Kameras mit Einzelblickpunkt, wie Fisheyekameras, und weit entfernte bzw. unendlich ferne Punkte zu behandeln. Speziell Punkte am Horizont k\"onnen \"uber lange Zeitr\"aume beobachtet werden und liefern somit eine stabile Richtungsinformation. Wir demonstrieren die praktische Umsetzung des Ansatzes anhand einer Bildfolge mit dem Multikamerasystem Ladybug3 von Point Grey, welches mit sechs Kameras 80 % der gesamten Sph\"are abbildet.},
    city = {Mainz},
    url = {https://www.ipb.uni-bonn.de/pdfs/schneider11dgpf.pdf},
    }

  • S. Schoppohl, “Klassifikation von Multispektralen und Hyperspektralen Fernerkundungsdaten mittels sequentieller Klassifikationsverfahren,” bachelor thesis Master Thesis, 2011.
    [BibTeX]

    Geography, climate and vegetation – elements in today’s changing. These changes have to be observed and analyzed in detail. To assure being up-to-date the classification of image data is a common procedure in remote sensing. For the implementation of image data classification many classification methods were developed and modified over the past years. The classification methods, the image data and the study area mainly affect the classification accuracy. In particular the progress of increasing training data showed a boost of classification accuracy. Though the costs and expenditure of time are very high in purchasing such training data. Nevertheless so called semi-supervised classification methods try to resolve this problem. In this bachelor thesis the focus is set on the Random Forest developed by Breiman. This classifier is combined with an incremental method. After this the classifier is able to generate new training data. Hence we implement the self-training method. To create an incremental Random Forest we proceed in several phases. First we train a conventional Random Forest with a small set of training data. In a second Phase the predicted classification is made. This allows pixel whose land use classes are unknown to be provided with pseudo-classes. At the same time the accuracy assessment is made on the trained Random Forest. For this we use the predefined test data from the given dataset. In a third stage the selection of the new training data is made. We define a threshold, so the new training data is not randomly selected. The confidence level of the new training data is measured on this threshold. If there is a sufficient number of new training data, which reach or exceed this confidence level, the new training data is added to the existing training data. On this basis a new Random Forest can be trained. This sequential process is determined by a specified iteration, or is stopped prematurely by a stopping criterion. Afterwards it is possible to classify a multi-spectral and hyperspectral dataset The assessment concluded that the combination parameters of the incremental Random Forest have a crucial impact on the classification results. Depending on the data set various configurations of parameters have to be tested. While comparing the conventional Random Forest with the incremental Random Forest partly significant differences in the classification results are obvious. Furthermore it should be noted that only a few class accuracy could be increased with the incremental Random Forest. Though the present thesis provides a good foundation to exploit the potential of the incremental Random Forest for further investigations.

    @MastersThesis{schoppohl2011klassifikation,
    title = {Klassifikation von Multispektralen und Hyperspektralen Fernerkundungsdaten mittels sequentieller Klassifikationsverfahren},
    author = {Schoppohl, Sebastian-Alexander},
    school = {Institute of Photogrammetry},
    year = {2011},
    note = {Betreuung: Prof. Dr. Bj\"orn Waske, Dipl.-Ing. Ribana Roscher},
    type = {bachelor thesis},
    abstract = {Geography, climate and vegetation - elements in today's changing. These changes have to be observed and analyzed in detail. To assure being up-to-date the classification of image data is a common procedure in remote sensing. For the implementation of image data classification many classification methods were developed and modified over the past years. The classification methods, the image data and the study area mainly affect the classification accuracy. In particular the progress of increasing training data showed a boost of classification accuracy. Though the costs and expenditure of time are very high in purchasing such training data. Nevertheless so called semi-supervised classification methods try to resolve this problem. In this bachelor thesis the focus is set on the Random Forest developed by Breiman. This classifier is combined with an incremental method. After this the classifier is able to generate new training data. Hence we implement the self-training method. To create an incremental Random Forest we proceed in several phases. First we train a conventional Random Forest with a small set of training data. In a second Phase the predicted classification is made. This allows pixel whose land use classes are unknown to be provided with pseudo-classes. At the same time the accuracy assessment is made on the trained Random Forest. For this we use the predefined test data from the given dataset. In a third stage the selection of the new training data is made. We define a threshold, so the new training data is not randomly selected. The confidence level of the new training data is measured on this threshold. If there is a sufficient number of new training data, which reach or exceed this confidence level, the new training data is added to the existing training data. On this basis a new Random Forest can be trained. This sequential process is determined by a specified iteration, or is stopped prematurely by a stopping criterion. Afterwards it is possible to classify a multi-spectral and hyperspectral dataset The assessment concluded that the combination parameters of the incremental Random Forest have a crucial impact on the classification results. Depending on the data set various configurations of parameters have to be tested. While comparing the conventional Random Forest with the incremental Random Forest partly significant differences in the classification results are obvious. Furthermore it should be noted that only a few class accuracy could be increased with the incremental Random Forest. Though the present thesis provides a good foundation to exploit the potential of the incremental Random Forest for further investigations.},
    city = {Bonn},
    }

  • J. Siegemund, U. Franke, and W. Förstner, “A Temporal Filter Approach for Detection and Reconstruction of Curbs and Road Surfaces based on Conditional Random Fields,” in IEEE Intelligent Vehicles Symposium (IV), 2011, pp. 637-642. doi:10.1109/IVS.2011.5940447
    [BibTeX] [PDF]

    A temporal filter approach for real-time detection and reconstruction of curbs and road surfaces from 3D point clouds is presented. Instead of local thresholding, as used in many other approaches, a 3D curb model is extracted from the point cloud. The 3D points are classified to different parts of the model (i.e. road and sidewalk) using a temporally integrated Conditional Random Field (CRF). The parameters of curb and road surface are then estimated from the respectively assigned points, providing a temporal connection via a Kalman filter. In this contribution, we employ dense stereo vision for data acquisition. Other sensors capturing point cloud data, e.g. lidar, would also be suitable. The system was tested on real-world scenarios, showing the advantages over a temporally unfiltered version, due to robustness, accuracy and computation time. Further, the lateral accuracy of the system is evaluated. The experiments show the system to yield highly accurate results, for curved and straight-line curbs, up to distances of 20 meters from the camera.

    @InProceedings{siegemund2011temporal,
    title = {A Temporal Filter Approach for Detection and Reconstruction of Curbs and Road Surfaces based on Conditional Random Fields},
    author = {Siegemund, Jan and Franke, Uwe and F\"orstner, Wolfgang},
    booktitle = {IEEE Intelligent Vehicles Symposium (IV)},
    year = {2011},
    month = {June},
    pages = {637-642},
    publisher = {IEEE Computer Society},
    abstract = {A temporal filter approach for real-time detection and reconstruction of curbs and road surfaces from 3D point clouds is presented. Instead of local thresholding, as used in many other approaches, a 3D curb model is extracted from the point cloud. The 3D points are classified to different parts of the model (i.e. road and sidewalk) using a temporally integrated Conditional Random Field (CRF). The parameters of curb and road surface are then estimated from the respectively assigned points, providing a temporal connection via a Kalman filter. In this contribution, we employ dense stereo vision for data acquisition. Other sensors capturing point cloud data, e.g. lidar, would also be suitable. The system was tested on real-world scenarios, showing the advantages over a temporally unfiltered version, due to robustness, accuracy and computation time. Further, the lateral accuracy of the system is evaluated. The experiments show the system to yield highly accurate results, for curved and straight-line curbs, up to distances of 20 meters from the camera.},
    doi = {10.1109/IVS.2011.5940447},
    url = {https://www.ipb.uni-bonn.de/pdfs/Siegemund2011Temporal.pdf},
    }

  • J. Sturm, C. Stachniss, and W. Burgard, “A Probabilistic Framework for Learning Kinematic Models of Articulated Objects,” , vol. 41, p. 477–526, 2011.
    [BibTeX] [PDF]
    [none]
    @Article{sturm2011,
    title = {A Probabilistic Framework for Learning Kinematic Models of Articulated Objects},
    author = {J. Sturm and C. Stachniss and W. Burgard},
    journal = jair,
    year = {2011},
    pages = {477--526},
    volume = {41},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/sturm11jair.pdf},
    }

  • B. Uebbing, “Untersuchung zur Nutzung wiederholter Strukturen für die 3D Rekonstruktion aus Einzelaufnahmen,” bachelor thesis Master Thesis, 2011.
    [BibTeX]

    \textbf{Summary} The goal of this work is the derivation of 3D-information from single images. Therefore identical repeated structures are used. These structures are common in man-made scenes. The repeated structures can be seen as multiple pictures of a single object. At first we simplify the problem by projecting it from 3D to 2D. Thus we introduce 1D cameras by taking the rows and columns of the image sections showing the repeated structures. By rectifying the image we can assume the normal case. By reconstructing and intersecting the projection rays of corresponding points from three 1D cameras 2D profiles of the repeated structure can be recovered. Using these profiles we can derive depth information and their uncertainty. By combining more than one profile in horizontal and vertical direction even a 3D model of the repeated structure can be recovered. We pursue this approach in two ways. First we discuss a simulation program which applies the developed concept under optimal circumstances. Furthermore we verify our estimate of the theoretical uncertainty by performing an empirical test. Second we test our approach on real images. Therefore we use images of building facades in which we use geometrically identical windows as repeated objects. In this process edge-feature extraction and matching of these features plays a major role with real images. We examine our results and conclude that our approach performs very well in the theoretical environment of the simulation program. There it is possible to create 2D profiles with a relative uncertainty of depth of 0.04% to 2%, depending on the assumption of the theoretical uncertainty. Also the reconstruction of 3D information of the used model in the simulation performs very well. The results on real images lack in completeness and precision caused by uncertainties during the edgefeature extraction and the following matching of the 1D edgepoints. The results are not very reliable and meaningful. This is mostly due to the relatively small depth of the repeated structures. Mostly, just horizontal 2D profiles can be recovered, because there are not three identical windows on top of each other. Other major sources of uncertainties are incidences of light, radial image distortions and disturbing objects behind the windows or reflections of objects. Our approach is therefore only of limited use on the images used by us. To produce good results with our approach we require certain circumstances like a high resolution image, so the repeated structures are also displayed in a high resolution. Furthermore the repeated objects should have a certain amount of depth, so the parallax is significant. \textbf{Zusammenfassung} Ziel dieser Arbeit ist die Ableitung von 3D-Informationen aus Einzelaufnahmen. Dazu werden identische, wiederholte Strukturen verwendet. Diese treten in von Menschenhand geschaffenen Objekten sehr häufig auf. Wir betrachten diese wiederholten Strukturen als mehrere Aufnahmen eines Objektes. Zunächst vereinfachen wir die Problemstellung, indem wir die 3D Rekonstruktion von Punkten und Linien auf eine 2D Rekonstruktion von Punkten reduzieren. Dazu werden 1D Kameras eingeführt. Die Zeilen und Spalten von Bildausschnitten wiederholter Objekte werden dabei als Aufnahmen von 1D Kameras betrachtet. Aufgrund der Rektifizierung der Bilder können wir das Vorliegen des Normalfalls annehmen. Durch Rekonstruktion und Verschneiden der Abbildungsstrahlen von korrespondierenden Punkten aus drei 1D Kameras werden 2D Profile rekonstruiert. Aus diesen lassen sich Tiefeninformationen und deren Genauigkeit ableiten. Durch Kombination mehrerer Profile in horizontaler und vertikaler Richtung lassen sich unter optimalen Bedingungen 3D Modelle der wiederholten Strukturen erstellen. Wir verfolgen diesen Ansatz auf zwei Wegen. Zunächst wird ein Simulationsprogramm behandelt, welches das entwickelte Konzept an einem Modell unter optimalen Bedingungen testet. Dabei wird zudem die Annahme der theoretischen Genauigkeit empirisch überprüft. In einem nächsten Schritt wird der Ansatz für die Anwendung auf echte Bilder übertragen. Dazu verwenden wir Aufnahmen von Gebäudefassaden, bei denen wir geometrisch identische Fenster als wiederholte Strukturen betrachten. Dabei spielen besonders Aspekte wie Kantenextraktion und eine korrekte Zuordnung korrespondierender Kanten eine Rolle. Letztendlich stellen wir fest, dass der von uns verfolgte Ansatz in der Theorie des Simulationsprogramms sehr gute Ergebnisse liefert. Es ist möglich 2D Profile mit einer relativen Tiefengenauigkeit von 0.04% bis 2%, je nach Annahme der theoretischen Genauigkeit, zu erstellen. Die Rekonstruktion der 3D Informationen des im Simulationsprogramm verwendeten Modells gelingt sehr gut. Die Anwendung auf echte Bilder liefert weniger gute Resultate. Durch Ungenauigkeiten in der Kantenextraktion und der Zuordnung am Rand der wiederholten Strukturen und einer zu geringen Tiefe der verwendeten Testobjekte sind die Ergebnisse nicht sehr akkurat und aussagekräftig. In der Regel werden nur horizontale 2D Profile erstellt, da meist nicht drei identische Fensterstrukturen übereinander liegen. Zudem spielen weitere Faktoren wie Lichteinfall, Verzeichnungen und Störobjekte in den von uns verwendeten Fenstern eine Rolle. Unser entwickeltes Verfahren lässt sich daher nur bedingt zur Rekonstruktion auf den von uns verwendeten Bildern benutzen.

    @MastersThesis{uebbing2011untersuchung,
    title = {Untersuchung zur Nutzung wiederholter Strukturen f\"ur die 3D Rekonstruktion aus Einzelaufnahmen},
    author = {Uebbing, Bernd},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2011},
    type = {bachelor thesis},
    abstract = {\textbf{Summary} The goal of this work is the derivation of 3D-information from single images. Therefore identical repeated structures are used. These structures are common in man-made scenes. The repeated structures can be seen as multiple pictures of a single object. At first we simplify the problem by projecting it from 3D to 2D. Thus we introduce 1D cameras by taking the rows and columns of the image sections showing the repeated structures. By rectifying the image we can assume the normal case. By reconstructing and intersecting the projection rays of corresponding points from three 1D cameras 2D profiles of the repeated structure can be recovered. Using these profiles we can derive depth information and their uncertainty. By combining more than one profile in horizontal and vertical direction even a 3D model of the repeated structure can be recovered. We pursue this approach in two ways. First we discuss a simulation program which applies the developed concept under optimal circumstances. Furthermore we verify our estimate of the theoretical uncertainty by performing an empirical test. Second we test our approach on real images. Therefore we use images of building facades in which we use geometrically identical windows as repeated objects. In this process edge-feature extraction and matching of these features plays a major role with real images. We examine our results and conclude that our approach performs very well in the theoretical environment of the simulation program. There it is possible to create 2D profiles with a relative uncertainty of depth of 0.04% to 2%, depending on the assumption of the theoretical uncertainty. Also the reconstruction of 3D information of the used model in the simulation performs very well. The results on real images lack in completeness and precision caused by uncertainties during the edgefeature extraction and the following matching of the 1D edgepoints. The results are not very reliable and meaningful. This is mostly due to the relatively small depth of the repeated structures. Mostly, just horizontal 2D profiles can be recovered, because there are not three identical windows on top of each other. Other major sources of uncertainties are incidences of light, radial image distortions and disturbing objects behind the windows or reflections of objects. Our approach is therefore only of limited use on the images used by us. To produce good results with our approach we require certain circumstances like a high resolution image, so the repeated structures are also displayed in a high resolution. Furthermore the repeated objects should have a certain amount of depth, so the parallax is significant. \textbf{Zusammenfassung} Ziel dieser Arbeit ist die Ableitung von 3D-Informationen aus Einzelaufnahmen. Dazu werden identische, wiederholte Strukturen verwendet. Diese treten in von Menschenhand geschaffenen Objekten sehr h\"aufig auf. Wir betrachten diese wiederholten Strukturen als mehrere
    Aufnahmen eines Objektes. Zun\"achst vereinfachen wir die Problemstellung, indem wir die 3D Rekonstruktion von Punkten und Linien auf eine 2D Rekonstruktion von Punkten reduzieren. Dazu werden 1D Kameras eingef\"uhrt. Die Zeilen und Spalten von Bildausschnitten wiederholter Objekte werden dabei als Aufnahmen von 1D Kameras betrachtet. Aufgrund der Rektifizierung der Bilder k\"onnen wir das Vorliegen des Normalfalls annehmen. Durch Rekonstruktion und Verschneiden der Abbildungsstrahlen von korrespondierenden Punkten aus drei 1D Kameras werden 2D Profile rekonstruiert. Aus diesen lassen sich Tiefeninformationen und deren Genauigkeit ableiten. Durch Kombination mehrerer Profile in horizontaler und vertikaler Richtung lassen sich unter optimalen Bedingungen 3D Modelle der wiederholten Strukturen erstellen. Wir verfolgen diesen Ansatz auf zwei Wegen. Zun\"achst wird ein Simulationsprogramm behandelt, welches das entwickelte Konzept an einem Modell unter optimalen Bedingungen testet. Dabei wird zudem die Annahme der theoretischen Genauigkeit empirisch \"uberpr\"uft. In einem n\"achsten Schritt wird der Ansatz f\"ur die Anwendung auf echte Bilder \"ubertragen. Dazu verwenden wir Aufnahmen von Geb\"audefassaden, bei denen wir geometrisch identische Fenster als wiederholte Strukturen betrachten. Dabei spielen besonders Aspekte wie Kantenextraktion und eine korrekte Zuordnung korrespondierender Kanten eine Rolle. Letztendlich stellen wir fest, dass der von uns verfolgte Ansatz in der Theorie des Simulationsprogramms sehr gute Ergebnisse liefert. Es ist m\"oglich 2D Profile mit einer relativen Tiefengenauigkeit von 0.04% bis 2%, je nach Annahme der theoretischen Genauigkeit, zu erstellen. Die Rekonstruktion der 3D Informationen des im Simulationsprogramm verwendeten Modells gelingt sehr gut. Die Anwendung auf echte Bilder liefert weniger gute Resultate. Durch Ungenauigkeiten in der Kantenextraktion und der Zuordnung am Rand der wiederholten Strukturen und einer zu geringen Tiefe der verwendeten Testobjekte sind die Ergebnisse nicht sehr akkurat und aussagekr\"aftig. In der Regel werden nur horizontale 2D Profile erstellt, da meist nicht drei identische Fensterstrukturen \"ubereinander liegen. Zudem spielen weitere Faktoren wie Lichteinfall, Verzeichnungen und St\"orobjekte in den von uns verwendeten Fenstern eine Rolle. Unser entwickeltes Verfahren l\"asst sich daher nur bedingt zur Rekonstruktion auf den von uns verwendeten Bildern benutzen.},
    city = {Bonn},
    }

  • B. Waske, R. Roscher, and S. Klemenjak, “Import Vector Machines Based Classification of Multisensor Remote Sensing Data,” in IEEE International Geoscience and Remote Sensing Symposium (IGARSS), 2011. doi:10.1109/IGARSS.2011.6049829
    [BibTeX]

    The classification of multisensor data sets, consisting of multitemporal SAR data and multispectral is addressed. In the present study, Import Vector Machines (IVM) are applied on two data sets, consisting of (i) Envisat ASAR/ERS-2 SAR data and a Landsat 5 TM scene, and (h) TerraSAR-X data and a RapidEye scene. The performance of IVM for classifying multisensor data is evaluated and the method is compared to Support Vector Machines (SVM) in terms of accuracy and complexity. In general, the experimental results demonstrate that the classification accuracy is improved by the multisensor data set. Moreover, IVM and SVM perform similar in terms of the classification accuracy. However, the number of import vectors is considerably less than the number of support vectors, and thus the computation time of the IVM classification is lower. IVM can directly be applied to the multi-class problems and provide probabilistic outputs. Overall IVM constitutes a feasible method and alternative to SVM.

    @InProceedings{waske2011import,
    title = {Import Vector Machines Based Classification of Multisensor Remote Sensing Data},
    author = {Waske, Bj\"orn and Roscher, Ribana and Klemenjak, Sascha},
    booktitle = {IEEE International Geoscience and Remote Sensing Symposium (IGARSS)},
    year = {2011},
    abstract = {The classification of multisensor data sets, consisting of multitemporal SAR data and multispectral is addressed. In the present study, Import Vector Machines (IVM) are applied on two data sets, consisting of (i) Envisat ASAR/ERS-2 SAR data and a Landsat 5 TM scene, and (h) TerraSAR-X data and a RapidEye scene. The performance of IVM for classifying multisensor data is evaluated and the method is compared to Support Vector Machines (SVM) in terms of accuracy and complexity. In general, the experimental results demonstrate that the classification accuracy is improved by the multisensor data set. Moreover, IVM and SVM perform similar in terms of the classification accuracy. However, the number of import vectors is considerably less than the number of support vectors, and thus the computation time of the IVM classification is lower. IVM can directly be applied to the multi-class problems and provide probabilistic outputs. Overall IVM constitutes a feasible method and alternative to SVM.},
    doi = {10.1109/IGARSS.2011.6049829},
    keywords = {Envisat ASAR ERS-2 SAR data;IVM;Landsat 5 TM scene;RapidEye scene;SVM comparison;TerraSAR-X data;computation time;data classification;import vector machines;multisensor remote sensing data;multispectral data;multitemporal SAR data;support vector machines;geophysical image processing;image classification;knowledge engineering;radar imaging;remote sensing by radar;spaceborne radar;synthetic aperture radar;},
    }

  • K. M. Wurm, D. Hennes, D. Holz, R. B. Rusu, C. Stachniss, K. Konolige, and W. Burgard, “Hierarchies of Octrees for Efficient 3D Mapping,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), San Francisco, CA, USA, 2011.
    [BibTeX] [PDF]
    [none]
    @InProceedings{wurm2011,
    title = {Hierarchies of Octrees for Efficient 3D Mapping},
    author = {K.M. Wurm and D. Hennes and D. Holz and R.B. Rusu and C. Stachniss and K. Konolige and W. Burgard},
    booktitle = iros,
    year = {2011},
    address = {San Francisco, CA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/wurm11iros.pdf},
    }

  • M. Y. Yang, “Hierarchical and Spatial Structures for Interpreting Images of Man-made Scenes Using Graphical Models,” PhD Thesis, 2011.
    [BibTeX] [PDF]

    \textbf{Summary} The task of semantic scene interpretation is to label the regions of an image and their relations into meaningful classes. Such task is a key ingredient to many computer vision applications, including object recognition, 3D reconstruction and robotic perception. It is challenging partially due to the ambiguities inherent to the image data. The images of man-made scenes, e. g. the building facade images, exhibit strong contextual dependencies in the form of the spatial and hierarchical structures. Modelling these structures is central for such interpretation task. Graphical models provide a consistent framework for the statistical modelling. Bayesian networks and random fields are two popular types of the graphical models, which are frequently used for capturing such contextual information. The motivation for our work comes from the belief that we can find a generic formulation for scene interpretation that having both the benefits from random fields and Bayesian networks. It should have clear semantic interpretability. Therefore our key contribution is the development of a generic statistical graphical model for scene interpretation, which seamlessly integrates different types of the image features, and the spatial structural information and the hierarchical structural information defined over the multi-scale image segmentation. It unifies the ideas of existing approaches, e. g. conditional random field (CRF) and Bayesian network (BN), which has a clear statistical interpretation as the maximum a posteriori (MAP) estimate of a multi-class labelling problem. Given the graphical model structure, we derive the probability distribution of the model based on the factorization property implied in the model structure. The statistical model leads to an energy function that can be optimized approximately by either loopy belief propagation or graph cut based move making algorithm. The particular type of the features, the spatial structure, and the hierarchical structure however is not prescribed. In the experiments, we concentrate on terrestrial man-made scenes as a specifically difficult problem. We demonstrate the application of the proposed graphical model on the task of multi-class classification of building facade image regions. The framework for scene interpretation allows for significantly better classification results than the standard classical local classification approach on man-made scenes by incorporating the spatial and hierarchical structures. We investigate the performance of the algorithms on a public dataset to show the relative importance ofthe information from the spatial structure and the hierarchical structure. As a baseline for the region classification, we use an efficient randomized decision forest classifier. Two specific models are derived from the proposed graphical model, namely the hierarchical CRF and the hierarchical mixed graphical model. We show that these two models produce better classification results than both the baseline region classifier and the flat CRF. \textbf{Zusammenfassung} Ziel der semantischen Bildinterpretation ist es, Bildregionen und ihre gegenseitigen Beziehungen zu kennzeichnen und in sinnvolle Klassen einzuteilen. Dies ist eine der Hauptaufgabe in vielen Bereichen des maschinellen Sehens, wie zum Beispiel der Objekterkennung, 3D Rekonstruktion oder der Wahrnehmung von Robotern. Insbesondere Bilder anthropogener Szenen, wie z.B. Fassadenaufnahmen, sind durch starke räumliche und hierarchische Strukturen gekennzeichnet. Diese Strukturen zu modellieren ist zentrale Teil der Interpretation, für deren statistische Modellierung graphische Modelle ein geeignetes konsistentes Werkzeug darstellen. Bayes Netze und Zufallsfelder sind zwei bekannte und häufig genutzte Beispiele für graphische Modelle zur Erfassung kontextabhängiger Informationen. Die Motivation dieser Arbeit liegt in der Überzeugung, dass wir eine generische Formulierung der Bildinterpretation mit klarer semantischer Bedeutung finden können, die die Vorteile von Bayes Netzen und Zufallsfeldern verbindet. Der Hauptbeitrag der vorliegenden Arbeit liegt daher in der Entwicklung eines generischen statistischen graphischen Modells zur Bildinterpretation, welches unterschiedlichste Typen von Bildmerkmalen und die räumlichen sowie hierarchischen Strukturinformationen über eine multiskalen Bildsegmentierung integriert. Das Modell vereinheitlicht die existierender Arbeiten zugrunde liegenden Ideen, wie bedingter Zufallsfelder (conditional random field (CRF)) und Bayesnetze (Bayesian network (BN)). Dieses Modell hat eine klare statistische Interpretation als Maximum a posteriori (MAP) Schätzer eines mehr klassen Zuordnungsproblems. Gegeben die Struktur des graphischen Modells und den dadurch definierten Faktorisierungseigenschaften leiten wir die Wahrscheinlichkeitsverteilung des Modells ab. Dies führt zu einer Energiefunktion, die näherungsweise optimiert werden kann. Der jeweilige Typ der Bildmerkmale, die räumliche sowie hierarchische Struktur ist von dieser Formulierung unabhängig. Wir zeigen die Anwendung des vorgeschlagenen graphischen Modells anhand der mehrklassen Zuordnung von Bildregionen in Fassadenaufnahmen. Wir demonstrieren, dass das vorgeschlagene Verfahren zur Bildinterpretation, durch die Berücksichtigung räumlicher sowie hierarchischer Strukturen, signifikant bessere Klassifikationsergebnisse zeigt, als klassische lokale Klassifikationsverfahren. Die Leistungsfähigkeit des vorgeschlagenen Verfahrens wird anhand eines öffentlich verfügbarer Datensatzes evaluiert. Zur Klassifikation der Bildregionen nutzen wir ein Verfahren basierend auf einem effizienten Random Forest Klassifikator. Aus dem vorgeschlagenen allgemeinen graphischen Modell werden konkret zwei spezielle Modelle abgeleitet, ein hierarchisches bedingtes Zufallsfeld (hierarchical CRF) sowie ein hierarchisches gemischtes graphisches Modell. Wir zeigen, dass beide Modelle bessere Klassifikationsergebnisse erzeugen als die zugrunde liegenden lokalen Klassifikatoren oder die einfachen bedingten Zufallsfelder.

    @PhDThesis{yang2011hierarchical,
    title = {Hierarchical and Spatial Structures for Interpreting Images of Man-made Scenes Using Graphical Models},
    author = {Michael Ying Yang},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2011},
    abstract = {\textbf{Summary} The task of semantic scene interpretation is to label the regions of an image and their relations into meaningful classes. Such task is a key ingredient to many computer vision applications, including object recognition, 3D reconstruction and robotic perception. It is challenging partially due to the ambiguities inherent to the image data. The images of man-made scenes, e. g. the building facade images, exhibit strong contextual dependencies in the form of the spatial and hierarchical structures. Modelling these structures is central for such interpretation task. Graphical models provide a consistent framework for the statistical modelling. Bayesian networks and random fields are two popular types of the graphical models, which are frequently used for capturing such contextual information. The motivation for our work comes from the belief that we can find a generic formulation for scene interpretation that having both the benefits from random fields and Bayesian networks. It should have clear semantic interpretability. Therefore our key contribution is the development of a generic statistical graphical model for scene interpretation, which seamlessly integrates different types of the image features, and the spatial structural information and the hierarchical structural information defined over the multi-scale image segmentation. It unifies the ideas of existing approaches, e. g. conditional random field (CRF) and Bayesian network (BN), which has a clear statistical interpretation as the maximum a posteriori (MAP) estimate of a multi-class labelling problem. Given the graphical model structure, we derive the probability distribution of the model based on the factorization property implied in the model structure. The statistical model leads to an energy function that can be optimized approximately by either loopy belief propagation or graph cut based move making algorithm. The particular type of the features, the spatial structure, and the hierarchical structure however is not prescribed. In the experiments, we concentrate on terrestrial man-made scenes as a specifically difficult problem. We demonstrate the application of the proposed graphical model on the task of multi-class classification of building facade image regions. The framework for scene interpretation allows for significantly better classification results than the standard classical local classification approach on man-made scenes by incorporating the spatial and hierarchical structures. We investigate the performance of the algorithms on a public dataset to show the relative importance ofthe information from the spatial structure and the hierarchical structure. As a baseline for the region classification, we use an efficient randomized decision forest classifier. Two specific models are derived from the proposed graphical model, namely the hierarchical CRF and the hierarchical mixed graphical model. We show that these two models produce better
    classification results than both the baseline region classifier and the flat CRF. \textbf{Zusammenfassung} Ziel der semantischen Bildinterpretation ist es, Bildregionen und ihre gegenseitigen Beziehungen zu kennzeichnen und in sinnvolle Klassen einzuteilen. Dies ist eine der Hauptaufgabe in vielen Bereichen des maschinellen Sehens, wie zum Beispiel der Objekterkennung, 3D Rekonstruktion oder der Wahrnehmung von Robotern. Insbesondere Bilder anthropogener Szenen, wie z.B. Fassadenaufnahmen, sind durch starke r\"aumliche und hierarchische Strukturen gekennzeichnet. Diese Strukturen zu modellieren ist zentrale Teil der Interpretation, f\"ur deren statistische Modellierung graphische Modelle ein geeignetes konsistentes Werkzeug darstellen. Bayes Netze und Zufallsfelder sind zwei bekannte und h\"aufig genutzte Beispiele f\"ur graphische Modelle zur Erfassung kontextabh\"angiger Informationen. Die Motivation dieser Arbeit liegt in der \"Uberzeugung, dass wir eine generische Formulierung der Bildinterpretation mit klarer semantischer Bedeutung finden k\"onnen, die die Vorteile von Bayes Netzen und Zufallsfeldern verbindet. Der Hauptbeitrag der vorliegenden Arbeit liegt daher in der Entwicklung eines generischen statistischen graphischen Modells zur Bildinterpretation, welches unterschiedlichste Typen von Bildmerkmalen und die r\"aumlichen sowie hierarchischen Strukturinformationen \"uber eine multiskalen Bildsegmentierung integriert. Das Modell vereinheitlicht die existierender Arbeiten zugrunde liegenden Ideen, wie bedingter Zufallsfelder (conditional random field (CRF)) und Bayesnetze (Bayesian network (BN)). Dieses Modell hat eine klare statistische Interpretation als Maximum a posteriori (MAP) Sch\"atzer eines mehr klassen Zuordnungsproblems. Gegeben die Struktur des graphischen Modells und den dadurch definierten Faktorisierungseigenschaften leiten wir die Wahrscheinlichkeitsverteilung des Modells ab. Dies f\"uhrt zu einer Energiefunktion, die n\"aherungsweise optimiert werden kann. Der jeweilige Typ der Bildmerkmale, die r\"aumliche sowie hierarchische Struktur ist von dieser Formulierung unabh\"angig. Wir zeigen die Anwendung des vorgeschlagenen graphischen Modells anhand der mehrklassen Zuordnung von Bildregionen in Fassadenaufnahmen. Wir demonstrieren, dass das vorgeschlagene Verfahren zur Bildinterpretation, durch die Ber\"ucksichtigung r\"aumlicher sowie hierarchischer Strukturen, signifikant bessere Klassifikationsergebnisse zeigt, als klassische lokale Klassifikationsverfahren. Die Leistungsf\"ahigkeit des vorgeschlagenen Verfahrens wird anhand eines \"offentlich verf\"ugbarer Datensatzes evaluiert. Zur Klassifikation der Bildregionen nutzen wir ein Verfahren basierend auf einem effizienten Random Forest Klassifikator. Aus dem vorgeschlagenen allgemeinen graphischen Modell werden konkret zwei spezielle Modelle abgeleitet, ein hierarchisches bedingtes Zufallsfeld (hierarchical CRF) sowie ein hierarchisches gemischtes
    graphisches Modell. Wir zeigen, dass beide Modelle bessere Klassifikationsergebnisse erzeugen als die zugrunde liegenden lokalen Klassifikatoren oder die einfachen bedingten Zufallsfelder.},
    url = {https://hss.ulb.uni-bonn.de/2012/2765/2765.htm},
    }

  • M. Y. Yang and W. Förstner, “Feature Evaluation for Building Facade Images – An Empirical Study,” International Archives of the Photogrammetry, Remote Sensing and Spatial Information Sciences, vol. XXXIX-B3, p. 513–518, 2011. doi:10.5194/isprsarchives-XXXIX-B3-513-2012
    [BibTeX] [PDF]

    The classification of building facade images is a challenging problem that receives a great deal of attention in the photogrammetry community. Image classification is critically dependent on the features. In this paper, we perform an empirical feature evaluation task for building facade images. Feature sets we choose are basic features, color features, histogram features, Peucker features, texture features, and SIFT features. We present an approach for region-wise labeling using an efficient randomized decision forest classifier and local features. We conduct our experiments with building facade image classification on the eTRIMS dataset, where our focus is the object classes building, car, door, pavement, road, sky, vegetation, and window.

    @Article{yang2011feature,
    title = {Feature Evaluation for Building Facade Images - An Empirical Study},
    author = {Yang, Michael Ying and F\"orstner, Wolfgang},
    journal = {International Archives of the Photogrammetry, Remote Sensing and Spatial Information Sciences},
    year = {2011},
    pages = {513--518},
    volume = {XXXIX-B3},
    abstract = {The classification of building facade images is a challenging problem that receives a great deal of attention in the photogrammetry community. Image classification is critically dependent on the features. In this paper, we perform an empirical feature evaluation task for building facade images. Feature sets we choose are basic features, color features, histogram features, Peucker features, texture features, and SIFT features. We present an approach for region-wise labeling using an efficient randomized decision forest classifier and local features. We conduct our experiments with building facade image classification on the eTRIMS dataset, where our focus is the object classes building, car, door, pavement, road, sky, vegetation, and window.},
    doi = {10.5194/isprsarchives-XXXIX-B3-513-2012},
    url = {https://www.ipb.uni-bonn.de/pdfs/Yang2011Feature.pdf},
    }

  • M. Y. Yang and W. Förstner, “A Hierarchical Conditional Random Field Model for Labeling and Classifying Images of Man-made Scenes,” in International Conf. on Computer Vision, IEEE/ISPRS Workshop on Computer Vision for Remote Sensing of the Environment, 2011. doi:10.1109/ICCVW.2011.6130243
    [BibTeX] [PDF]

    Semantic scene interpretation as a collection of meaningful regions in images is a fundamental problem in both photogrammetry and computer vision. Images of man-made scenes exhibit strong contextual dependencies in the form of spatial and hierarchical structures. In this paper, we introduce a hierarchical conditional random field to deal with the problem of image classification by modeling spatial and hierarchical structures. The probability outputs of an efficient randomized decision forest classifier are used as unary potentials. The spatial and hierarchical structures of the regions are integrated into pairwise potentials. The model is built on multi-scale image analysis in order to aggregate evidence from local to global level. Experimental results are provided to demonstrate the performance of the proposed method using images from eTRIMS dataset, where our focus is the object classes building, car, door, pavement, road, sky, vegetation, and window.

    @InProceedings{yang2011hierarchicala,
    title = {A Hierarchical Conditional Random Field Model for Labeling and Classifying Images of Man-made Scenes},
    author = {Yang, Michael Ying and F\"orstner, Wolfgang},
    booktitle = {International Conf. on Computer Vision, IEEE/ISPRS Workshop on Computer Vision for Remote Sensing of the Environment},
    year = {2011},
    abstract = {Semantic scene interpretation as a collection of meaningful regions in images is a fundamental problem in both photogrammetry and computer vision. Images of man-made scenes exhibit strong contextual dependencies in the form of spatial and hierarchical structures. In this paper, we introduce a hierarchical conditional random field to deal with the problem of image classification by modeling spatial and hierarchical structures. The probability outputs of an efficient randomized decision forest classifier are used as unary potentials. The spatial and hierarchical structures of the regions are integrated into pairwise potentials. The model is built on multi-scale image analysis in order to aggregate evidence from local to global level. Experimental results are provided to demonstrate the performance of the proposed method using images from eTRIMS dataset, where our focus is the object classes building, car, door, pavement, road, sky, vegetation, and window.},
    doi = {10.1109/ICCVW.2011.6130243},
    url = {https://www.ipb.uni-bonn.de/pdfs/Yang2011Hierarchical.pdf},
    }

  • M. Y. Yang and W. Förstner, “Regionwise Classification of Building Facade Images,” in Photogrammetric Image Analysis (PIA2011), 2011, p. 209 – 220. doi:10.1007/978-3-642-24393-6_18
    [BibTeX] [PDF]

    In recent years, the classification task of building facade images receives a great deal of attention in the photogrammetry community. In this paper, we present an approach for regionwise classification using an efficient randomized decision forest classifier and local features. A conditional random field is then introduced to enforce spatial consistency between neighboring regions. Experimental results are provided to illustrate the performance of the proposed methods using image from eTRIMS database, where our focus is the object classes building, car, door, pavement, road, sky, vegetation, and window.

    @InProceedings{yang2011regionwise,
    title = {Regionwise Classification of Building Facade Images},
    author = {Yang, Michael Ying and F\"orstner, Wolfgang},
    booktitle = {Photogrammetric Image Analysis (PIA2011)},
    year = {2011},
    note = {Stilla, Uwe / Rottensteiner, Franz / Mayer, H. / Jutzi, Boris / Butenuth, Matthias (Hg.); Munich},
    pages = {209 -- 220},
    publisher = {Springer},
    series = {LNCS 6952},
    abstract = {In recent years, the classification task of building facade images receives a great deal of attention in the photogrammetry community. In this paper, we present an approach for regionwise classification using an efficient randomized decision forest classifier and local features. A conditional random field is then introduced to enforce spatial consistency between neighboring regions. Experimental results are provided to illustrate the performance of the proposed methods using image from eTRIMS database, where our focus is the object classes building, car, door, pavement, road, sky, vegetation, and window.},
    doi = {10.1007/978-3-642-24393-6_18},
    url = {https://www.ipb.uni-bonn.de/pdfs/Yang2011Regionwise.pdf},
    }

  • J. Ziegler, H. Kretzschmar, C. Stachniss, G. Grisetti, and W. Burgard, “Accurate Human Motion Capture in Large Areas by Combining IMU- and Laser-based People Tracking,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), San Francisco, CA, USA, 2011.
    [BibTeX] [PDF]
    [none]
    @InProceedings{ziegler2011,
    title = {Accurate Human Motion Capture in Large Areas by Combining IMU- and Laser-based People Tracking},
    author = {J. Ziegler and H. Kretzschmar and C. Stachniss and G. Grisetti and W. Burgard},
    booktitle = iros,
    year = {2011},
    address = {San Francisco, CA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/ziegler11iros.pdf},
    }

2010

  • M. Albrecht, “Erkennung bewegter Objekte auf fluktuierendem Hintergrund in Bildfolgen,” Master Thesis, 2010.
    [BibTeX] [PDF]
    [none]
    @MastersThesis{albrecht2010erkennung,
    title = {Erkennung bewegter Objekte auf fluktuierendem Hintergrund in Bildfolgen},
    author = {Albrecht, Markus},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2010},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. Ribana Roscher},
    abstract = {[none]},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Albrecht2010Erkennung.pdf},
    }

  • A. Barth and U. Franke, “Tracking Oncoming and Turning Vehicles at Intersections,” in Intelligent Transportation Systems, IEEE Conf. on, Madeira Island, Portugal, 2010, p. 861–868. doi:10.1109/ITSC.2010.5624969
    [BibTeX] [PDF]

    This article addresses the reliable tracking of oncoming traffic at urban intersections from a moving platform with a stereo vision system. Both motion and depth information is combined to estimate the pose and motion parameters of an oncoming vehicle, including the yaw rate, by means of Kalman filtering. Vehicle tracking at intersections is particularly chal- lenging since vehicles can turn quickly. A single filter approach cannot cover the dynamic range of a vehicle sufficiently. We propose a real-time multi-filter approach for vehicle tracking at intersections. A gauge consistency criteria as well as a robust outlier detection method allow for dealing with sudden accelerations and self-occlusions during turn maneuvers. The system is evaluated both on synthetic and real-world data.

    @InProceedings{barth2010tracking,
    title = {Tracking Oncoming and Turning Vehicles at Intersections},
    author = {Barth, Alexander and Franke, Uwe},
    booktitle = {Intelligent Transportation Systems, IEEE Conf. on},
    year = {2010},
    address = {Madeira Island, Portugal},
    pages = {861--868},
    abstract = {This article addresses the reliable tracking of oncoming traffic at urban intersections from a moving platform with a stereo vision system. Both motion and depth information is combined to estimate the pose and motion parameters of an oncoming vehicle, including the yaw rate, by means of Kalman filtering. Vehicle tracking at intersections is particularly chal- lenging since vehicles can turn quickly. A single filter approach cannot cover the dynamic range of a vehicle sufficiently. We propose a real-time multi-filter approach for vehicle tracking at intersections. A gauge consistency criteria as well as a robust outlier detection method allow for dealing with sudden accelerations and self-occlusions during turn maneuvers. The system is evaluated both on synthetic and real-world data.},
    doi = {10.1109/ITSC.2010.5624969},
    url = {https://www.ipb.uni-bonn.de/pdfs/Barth2010Tracking.pdf},
    }

  • A. Barth, J. Siegemund, A. Meißner, U. Franke, and W. Förstner, “Probabilistic Multi-Class Scene Flow Segmentation for Traffic Scenes,” in Pattern Recognition (Symposium of DAGM), 2010, p. 503–512. doi:10.1007/978-3-642-15986-2_51
    [BibTeX] [PDF]

    A multi-class traffic scene segmentation approach based on scene flow data is presented. Opposed to many other approaches using color or texture features, our approach is purely based on dense depth and 3D motion information. Using prior knowledge on tracked objects in the scene and the pixel-wise uncertainties of the scene flow data, each pixel is assigned to either a particular moving object class (tracked/unknown object), the ground surface, or static background. The global topological order of classes, such as objects are above ground, is locally integrated into a conditional random field by an ordering constraint. The proposed method yields very accurate segmentation results on challenging real world scenes, which we made publicly available for comparison.

    @InProceedings{barth2010probabilistic,
    title = {Probabilistic Multi-Class Scene Flow Segmentation for Traffic Scenes},
    author = {Barth, Alexander and Siegemund, Jan and Mei{\ss}ner, Annemarie and Franke, Uwe and F\"orstner, Wolfgang},
    booktitle = {Pattern Recognition (Symposium of DAGM)},
    year = {2010},
    editor = {Goesele, M. and Roth, S. and Kuijper, A. and Schiele, B. and Schindler, K.},
    note = {Darmstadt},
    pages = {503--512},
    publisher = {Springer},
    abstract = {A multi-class traffic scene segmentation approach based on scene flow data is presented. Opposed to many other approaches using color or texture features, our approach is purely based on dense depth and 3D motion information. Using prior knowledge on tracked objects in the scene and the pixel-wise uncertainties of the scene flow data, each pixel is assigned to either a particular moving object class (tracked/unknown object), the ground surface, or static background. The global topological order of classes, such as objects are above ground, is locally integrated into a conditional random field by an ordering constraint. The proposed method yields very accurate segmentation results on challenging real world scenes, which we made publicly available for comparison.},
    doi = {10.1007/978-3-642-15986-2_51},
    url = {https://www.ipb.uni-bonn.de/pdfs/Barth2010Probabilistic.pdf},
    }

  • W. Burgard, K. M. Wurm, M. Bennewitz, C. Stachniss, A. Hornung, R. B. Rusu, and K. Konolige, “Modeling the World Around Us: An Efficient 3D Representation for Personal Robotics,” in Workshop on Defining and Solving Realistic Perception Problems in Personal Robotics at the IEEE/RSJ Int.Conf.on Intelligent Robots and Systems, Taipei, Taiwan, 2010.
    [BibTeX]
    [none]
    @InProceedings{burgard2010,
    title = {Modeling the World Around Us: An Efficient 3D Representation for Personal Robotics},
    author = {Burgard, W. and Wurm, K.M. and Bennewitz, M. and Stachniss, C. and Hornung, A. and Rusu, R.B. and Konolige, K.},
    booktitle = {Workshop on Defining and Solving Realistic Perception Problems in Personal Robotics at the IEEE/RSJ Int.Conf.on Intelligent Robots and Systems},
    year = {2010},
    address = {Taipei, Taiwan},
    abstract = {[none]},
    timestamp = {2014.04.24},
    }

  • T. Castaings, B. Waske, J. A. Benediktsson, and J. Chanussot, “On the influence of feature reduction for the classification of hyperspectral images based on the extended morphological profile,” International Journal of Remote Sensing, vol. 31, iss. 22, p. 5975–5991, 2010. doi:10.1080/01431161.2010.512313
    [BibTeX]

    In this study we investigated the classification of hyperspectral data with high spatial resolution. Previously, methods that generate a so-called extended morphological profile (EMP) from the principal components of an image have been proposed to create base images for morphological transformations. However, it can be assumed that the feature reduction (FR) may have a significant effect on the accuracy of the classification of the EMP. We therefore investigated the effect of different FR methods on the generation and classification of the EMP of hyperspectral images from urban areas, using a machine learning-based algorithm for classification. The applied FR methods include: principal component analysis (PCA), nonparametric weighted feature extraction (NWFE), decision boundary feature extraction (DBFE), Gaussian kernel PCA (KPCA) and Bhattacharyya distance feature selection (BDFS). Experiments were run with two classification algorithms: the support vector machine (SVM) and random forest (RF) algorithms. We demonstrate that the commonly used PCA approach seems to be nonoptimal in a large number of cases in terms of classification accuracy, and the other FR methods may be more suitable as preprocessing approaches for the EMP.

    @Article{castaings2010influence,
    title = {On the influence of feature reduction for the classification of hyperspectral images based on the extended morphological profile},
    author = {Castaings, Thibaut and Waske, Bj\"orn and Benediktsson, Jon Atli and Chanussot, Jocelyn},
    journal = {International Journal of Remote Sensing},
    year = {2010},
    number = {22},
    pages = {5975--5991},
    volume = {31},
    abstract = {In this study we investigated the classification of hyperspectral data with high spatial resolution. Previously, methods that generate a so-called extended morphological profile (EMP) from the principal components of an image have been proposed to create base images for morphological transformations. However, it can be assumed that the feature reduction (FR) may have a significant effect on the accuracy of the classification of the EMP. We therefore investigated the effect of different FR methods on the generation and classification of the EMP of hyperspectral images from urban areas, using a machine learning-based algorithm for classification. The applied FR methods include: principal component analysis (PCA), nonparametric weighted feature extraction (NWFE), decision boundary feature extraction (DBFE), Gaussian kernel PCA (KPCA) and Bhattacharyya distance feature selection (BDFS). Experiments were run with two classification algorithms: the support vector machine (SVM) and random forest (RF) algorithms. We demonstrate that the commonly used PCA approach seems to be nonoptimal in a large number of cases in terms of classification accuracy, and the other FR methods may be more suitable as preprocessing approaches for the EMP.},
    doi = {10.1080/01431161.2010.512313},
    owner = {waske},
    sn = {0143-1161},
    tc = {4},
    timestamp = {2012.09.04},
    ut = {WOS:000284956500011},
    z8 = {0},
    z9 = {4},
    zb = {1},
    }

  • X. Ceamanos, B. Waske, J. A. Benediktsson, J. Chanussot, M. Fauvel, and J. R. Sveinsson, “A classifier ensemble based on fusion of support vector machines for classifying hyperspectral data,” International Journal of Image and Data Fusion, vol. 1, iss. 4, p. 293–307, 2010. doi:10.1080/19479832.2010.485935
    [BibTeX]

    Classification of hyperspectral data using a classifier ensemble that is based on support vector machines (SVMs) are addressed. First, the hyperspectral data set is decomposed into a few data sources according to the similarity of the spectral bands. Then, each source is processed separately by performing classification based on SVM. Finally, all outputs are used as input for final decision fusion performed by an additional SVM classifier. Results of the experiments underline how the proposed SVM fusion ensemble outperforms a standard SVM classifier in terms of overall and class accuracies, the improvement being irrespective of the size of the training sample set. The definition of the data sources resulting from the original data set is also studied.

    @Article{ceamanos2010classifier,
    title = {A classifier ensemble based on fusion of support vector machines for classifying hyperspectral data},
    author = {Ceamanos, Xavier and Waske, Bj\"orn and Benediktsson, Jon Atli and Chanussot, Jocelyn and Fauvel, Mathieu and Sveinsson, Johannes R.},
    journal = {International Journal of Image and Data Fusion},
    year = {2010},
    number = {4},
    pages = {293--307},
    volume = {1},
    abstract = {Classification of hyperspectral data using a classifier ensemble that is based on support vector machines (SVMs) are addressed. First, the hyperspectral data set is decomposed into a few data sources according to the similarity of the spectral bands. Then, each source is processed separately by performing classification based on SVM. Finally, all outputs are used as input for final decision fusion performed by an additional SVM classifier. Results of the experiments underline how the proposed SVM fusion ensemble outperforms a standard SVM classifier in terms of overall and class accuracies, the improvement being irrespective of the size of the training sample set. The definition of the data sources resulting from the original data set is also studied.},
    doi = {10.1080/19479832.2010.485935},
    }

  • M. Dalla Mura, J. A. Benediktsson, B. Waske, and L. Bruzzone, “Extended profiles with morphological attribute filters for the analysis of hyperspectral data,” International Journal of Remote Sensing, vol. 31, iss. 22, p. 5975–5991, 2010. doi:10.1080/01431161.2010.512425
    [BibTeX]

    Extended attribute profiles and extended multi-attribute profiles are presented for the analysis of hyperspectral high-resolution images. These extended profiles are based on morphological attribute filters and, through a multi-level analysis, are capable of extracting spatial features that can better model the spatial information, with respect to conventional extended morphological profiles. The features extracted by the proposed extended profiles were considered for a classification task. Two hyperspectral high-resolution datasets acquired for the city of Pavia, Italy, were considered in the analysis. The effectiveness of the introduced operators in modelling the spatial information was proved by the higher classification accuracies obtained with respect to those achieved by a conventional extended morphological profile.

    @Article{dallamura2010extended,
    title = {Extended profiles with morphological attribute filters for the analysis of hyperspectral data},
    author = {Dalla Mura, Mauro and Benediktsson, Jon Atli and Waske, Bj\"orn and Bruzzone, Lorenzo},
    journal = {International Journal of Remote Sensing},
    year = {2010},
    number = {22},
    pages = {5975--5991},
    volume = {31},
    abstract = {Extended attribute profiles and extended multi-attribute profiles are presented for the analysis of hyperspectral high-resolution images. These extended profiles are based on morphological attribute filters and, through a multi-level analysis, are capable of extracting spatial features that can better model the spatial information, with respect to conventional extended morphological profiles. The features extracted by the proposed extended profiles were considered for a classification task. Two hyperspectral high-resolution datasets acquired for the city of Pavia, Italy, were considered in the analysis. The effectiveness of the introduced operators in modelling the spatial information was proved by the higher classification accuracies obtained with respect to those achieved by a conventional extended morphological profile.},
    doi = {10.1080/01431161.2010.512425},
    owner = {waske},
    sn = {0143-1161},
    tc = {7},
    timestamp = {2012.09.04},
    ut = {WOS:000284956500013},
    z8 = {0},
    z9 = {7},
    zb = {0},
    }

  • M. Dalla Mura, J. A. Benediktsson, B. Waske, and L. Bruzzone, “Morphological Attribute Profiles for the Analysis of Very High Resolution Images,” IEEE Transactions on Geoscience and Remote Sensing, vol. 48, iss. 10, p. 3747–3762, 2010. doi:10.1109/TGRS.2010.2048116
    [BibTeX]

    Morphological attribute profiles (APs) are defined as a generalization of the recently proposed morphological profiles (MPs). APs provide a multilevel characterization of an image created by the sequential application of morphological attribute filters that can be used to model different kinds of the structural information. According to the type of the attributes considered in the morphological attribute transformation, different parametric features can be modeled. The generation of APs, thanks to an efficient implementation, strongly reduces the computational load required for the computation of conventional MPs. Moreover, the characterization of the image with different attributes leads to a more complete description of the scene and to a more accurate modeling of the spatial information than with the use of conventional morphological filters based on a predefined structuring element. Here, the features extracted by the proposed operators were used for the classification of two very high resolution panchromatic images acquired by Quickbird on the city of Trento, Italy. The experimental analysis proved the usefulness of APs in modeling the spatial information present in the images. The classification maps obtained by considering different APs result in a better description of the scene (both in terms of thematic and geometric accuracy) than those obtained with an MP.

    @Article{dallamura2010morphological,
    title = {Morphological Attribute Profiles for the Analysis of Very High Resolution Images},
    author = {Dalla Mura, Mauro and Benediktsson, Jon Atli and Waske, Bj\"orn and Bruzzone, Lorenzo},
    journal = {IEEE Transactions on Geoscience and Remote Sensing},
    year = {2010},
    month = oct,
    number = {10},
    pages = {3747--3762},
    volume = {48},
    abstract = {Morphological attribute profiles (APs) are defined as a generalization of the recently proposed morphological profiles (MPs). APs provide a multilevel characterization of an image created by the sequential application of morphological attribute filters that can be used to model different kinds of the structural information. According to the type of the attributes considered in the morphological attribute transformation, different parametric features can be modeled. The generation of APs, thanks to an efficient implementation, strongly reduces the computational load required for the computation of conventional MPs. Moreover, the characterization of the image with different attributes leads to a more complete description of the scene and to a more accurate modeling of the spatial information than with the use of conventional morphological filters based on a predefined structuring element. Here, the features extracted by the proposed operators were used for the classification of two very high resolution panchromatic images acquired by Quickbird on the city of Trento, Italy. The experimental analysis proved the usefulness of APs in modeling the spatial information present in the images. The classification maps obtained by considering different APs result in a better description of the scene (both in terms of thematic and geometric accuracy) than those obtained with an MP.},
    doi = {10.1109/TGRS.2010.2048116},
    owner = {waske},
    sn = {0196-2892},
    tc = {15},
    timestamp = {2012.09.04},
    ut = {WOS:000283349400014},
    z8 = {0},
    z9 = {15},
    zb = {1},
    }

  • W. Förstner, “Minimal Representations for Uncertainty and Estimation in Projective Spaces,” in Proc. of Asian Conf. on Computer Vision, 2010, p. 619–633, Part II. doi:10.1007/978-3-642-19309-5_48
    [BibTeX] [PDF]

    Estimation using homogeneous entities has to cope with obstacles such as singularities of covariance matrices and redundant parametrizations which do not allow an immediate definition of maximum likelihood estimation and lead to estimation problems with more parameters than necessary. The paper proposes a representation of the uncertainty of all types of geometric entities and estimation procedures for geometric entities and transformations which (1) only require the minimum number of parameters, (2) are free of singularities, (3) allow for a consistent update within an iterative procedure, (4) enable to exploit the simplicity of homogeneous coordinates to represent geometric constraints and (5) allow to handle geometric entities which are at in nity or at least very far, avoiding the usage of concepts like the inverse depth. Such representations are already available for transformations such as rotations, motions (Rosenhahn 2002), homographies (Begelfor 2005), or the projective correlation with fundamental matrix (Bartoli 2004) all being elements of some Lie group. The uncertainty is represented in the tangent space of the manifold, namely the corresponding Lie algebra. However, to our knowledge no such representations are developed for the basic geometric entities such as points, lines and planes, as in addition to use the tangent space of the manifolds we need transformation of the entities such that they stay on their specific manifold during the estimation process. We develop the concept, discuss its usefulness for bundle adjustment and demonstrate (a) its superiority compared to more simple methods for vanishing point estimation, (b) its rigour when estimating 3D lines from 3D points and (c) its applicability for determining 3D lines from observed image line segments in a multi view setup.

    @InProceedings{forstner2010minimal,
    title = {Minimal Representations for Uncertainty and Estimation in Projective Spaces},
    author = {F\"orstner, Wolfgang},
    booktitle = {Proc. of Asian Conf. on Computer Vision},
    year = {2010},
    note = {Queenstown, New Zealand},
    pages = {619--633, Part II},
    abstract = {Estimation using homogeneous entities has to cope with obstacles such as singularities of covariance matrices and redundant parametrizations which do not allow an immediate definition of maximum likelihood estimation and lead to estimation problems with more parameters than necessary. The paper proposes a representation of the uncertainty of all types of geometric entities and estimation procedures for geometric entities and transformations which (1) only require the minimum number of parameters, (2) are free of singularities, (3) allow for a consistent update within an iterative procedure, (4) enable to exploit the simplicity of homogeneous coordinates to represent geometric constraints and (5) allow to handle geometric entities which are at in nity or at least very far, avoiding the usage of concepts like the inverse depth. Such representations are already available for transformations such as rotations, motions (Rosenhahn 2002), homographies (Begelfor 2005), or the projective correlation with fundamental matrix (Bartoli 2004) all being elements of some Lie group. The uncertainty is represented in the tangent space of the manifold, namely the corresponding Lie algebra. However, to our knowledge no such representations are developed for the basic geometric entities such as points, lines and planes, as in addition to use the tangent space of the manifolds we need transformation of the entities such that they stay on their specific manifold during the estimation process. We develop the concept, discuss its usefulness for bundle adjustment and demonstrate (a) its superiority compared to more simple methods for vanishing point estimation, (b) its rigour when estimating 3D lines from 3D points and (c) its applicability for determining 3D lines from observed image line segments in a multi view setup.},
    doi = {10.1007/978-3-642-19309-5_48},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner2010Minimal.pdf},
    }

  • W. Förstner, “Optimal Vanishing Point Detection and Rotation Estimation of Single Images of a Legolandscene,” in Int. Archives of Photogrammetry and Remote Sensing, 2010, p. 157–163, Part A..
    [BibTeX] [PDF]

    The paper presents a method for automatically and optimally determining the vanishing points of a single image, and in case the interior orientation is given, the rotation of an image with respect to the intrinsic coordinate system of a lego land scene. We perform rigorous testing and estimation in order to be as independent on control parameters as possible. This refers to (1) estimating vanishing points from line segments and the rotation matrix, (2) to testing during RANSAC and during boosting lines and (3) to classifying the line segments w. r. t. their vanishing point. Spherically normalized homogeneous coordinates are used for line segments and especially for vanishing points to allow for points at infinity. We propose a minimal representation for the uncertainty of homogeneous coordinates of 2D points and 2D lines and rotations to avoid the use of singular covariance matrices of observed line segments. This at the same time allows to estimate the parameters with a minimal representation. The vanishing point detection method is experimentally validated on a set of 292 images.

    @InProceedings{forstner2010optimal,
    title = {Optimal Vanishing Point Detection and Rotation Estimation of Single Images of a Legolandscene},
    author = {F\"orstner, Wolfgang},
    booktitle = {Int. Archives of Photogrammetry and Remote Sensing},
    year = {2010},
    organization = {ISPRS Symposium Comm. III, Paris},
    pages = {157--163, Part A.},
    abstract = {The paper presents a method for automatically and optimally determining the vanishing points of a single image, and in case the interior orientation is given, the rotation of an image with respect to the intrinsic coordinate system of a lego land scene. We perform rigorous testing and estimation in order to be as independent on control parameters as possible. This refers to (1) estimating vanishing points from line segments and the rotation matrix, (2) to testing during RANSAC and during boosting lines and (3) to classifying the line segments w. r. t. their vanishing point. Spherically normalized homogeneous coordinates are used for line segments and especially for vanishing points to allow for points at infinity. We propose a minimal representation for the uncertainty of homogeneous coordinates of 2D points and 2D lines and rotations to avoid the use of singular covariance matrices of observed line segments. This at the same time allows to estimate the parameters with a minimal representation. The vanishing point detection method is experimentally validated on a set of 292 images.},
    location = {wf},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner2010Optimal.pdf},
    }

  • B. Frank, R. Schmedding, C. Stachniss, M. Teschner, and W. Burgard, “Learning Deformable Object Models for Mobile Robot Path Planning using Depth Cameras and a Manipulation Robot,” in Proc. of the Workshop RGB-D: Advanced Reasoning with Depth Cameras at Robotics: Science and Systems (RSS), Zaragoza, Spain, 2010.
    [BibTeX] [PDF]
    [none]
    @InProceedings{frank2010,
    title = {Learning Deformable Object Models for Mobile Robot Path Planning using Depth Cameras and a Manipulation Robot},
    author = {B. Frank and R. Schmedding and C. Stachniss and M. Teschner and W. Burgard},
    booktitle = {Proc. of the Workshop RGB-D: Advanced Reasoning with Depth Cameras at Robotics: Science and Systems (RSS)},
    year = {2010},
    address = {Zaragoza, Spain},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/frank10rssws.pdf},
    }

  • B. Frank, R. Schmedding, C. Stachniss, M. Teschner, and W. Burgard, “Learning the Elasticity Parameters of Deformable Objects with a Manipulation Robot,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), Taipei, Taiwan, 2010.
    [BibTeX] [PDF]
    [none]
    @InProceedings{frank2010a,
    title = {Learning the Elasticity Parameters of Deformable Objects with a Manipulation Robot},
    author = {B. Frank and R. Schmedding and C. Stachniss and M. Teschner and W. Burgard},
    booktitle = iros,
    year = {2010},
    address = {Taipei, Taiwan},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/frank10iros.pdf},
    }

  • G. Grisetti, R. Kümmerle, C. Stachniss, and W. Burgard, “A Tutorial on Graph-based SLAM,” IEEE Transactions on Intelligent Transportation Systems Magazine, vol. 2, p. 31–43, 2010.
    [BibTeX] [PDF]
    [none]
    @Article{grisetti2010a,
    title = {A Tutorial on Graph-based {SLAM}},
    author = {G. Grisetti and R. K{\"u}mmerle and C. Stachniss and W. Burgard},
    journal = {IEEE Transactions on Intelligent Transportation Systems Magazine},
    year = {2010},
    pages = {31--43},
    volume = {2},
    abstract = {[none]},
    issue = {4},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/grisetti10titsmag.pdf},
    }

  • G. Grisetti, R. Kümmerle, C. Stachniss, U. Frese, and C. Hertzberg, “Hierarchical Optimization on Manifolds for Online 2D and 3D Mapping,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Anchorage, Alaska, 2010.
    [BibTeX] [PDF]
    [none]
    @InProceedings{grisetti2010,
    title = {Hierarchical Optimization on Manifolds for Online 2D and 3D Mapping},
    author = {G. Grisetti and R. K{\"u}mmerle and C. Stachniss and U. Frese and C. Hertzberg},
    booktitle = icra,
    year = {2010},
    address = {Anchorage, Alaska},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/grisetti10icra.pdf},
    }

  • A. Hüsgen, “Multi-Modal Segmentation of Anatomical and Functional Image of the Brain,” Diploma Thesis Master Thesis, 2010.
    [BibTeX] [PDF]
    [none]
    @MastersThesis{husgen2010multi,
    title = {Multi-Modal Segmentation of Anatomical and Functional Image of the Brain},
    author = {H\"usgen, Andreas},
    school = {University of Bonn},
    year = {2010},
    note = {Betreuung: Prof. Dr.-Ing. W. F\"orstner, Privatdozent Dr. Volker Steinhage},
    type = {Diploma Thesis},
    abstract = {[none]},
    url = {https://www.ipb.uni-bonn.de/pdfs/Husgen2010Multi.pdf},
    }

  • A. Hecheltjen, B. Waske, F. Thonfeld, M. Braun, and G. Menz, “Support Vector Machines for Multitemporal and Multisensor Change Detection,” in ESA’s Living Planet Symposium (ESA SP-686), 2010.
    [BibTeX]
    [none]
    @InProceedings{hecheltjen2010support,
    title = {Support Vector Machines for Multitemporal and Multisensor Change Detection},
    author = {Hecheltjen, Antje and Waske, Bj\"orn and Thonfeld, Frank and Braun, Matthias and Menz, Gunter},
    booktitle = {ESA's Living Planet Symposium (ESA SP-686)},
    year = {2010},
    abstract = {[none]},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • A. Hornung, M.Bennewitz, C. Stachniss, H. Strasdat, S. Oßwald, and W. Burgard, “Learning Adaptive Navigation Strategies for Resource-Constrained Systems,” in Proc. of the Int. Workshop on Evolutionary and Reinforcement Learning for Autonomous Robot Systems, Lisbon, Portugal, 2010.
    [BibTeX] [PDF]
    [none]
    @InProceedings{hornung2010,
    title = {Learning Adaptive Navigation Strategies for Resource-Constrained Systems},
    author = {A. Hornung and M.Bennewitz and C. Stachniss and H. Strasdat and S. O{\ss}wald and W. Burgard},
    booktitle = {Proc. of the Int. Workshop on Evolutionary and Reinforcement Learning for Autonomous Robot Systems},
    year = {2010},
    address = {Lisbon, Portugal},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/hornung10erlars.pdf},
    }

  • M. Karg, K. M. Wurm, C. Stachniss, K. Dietmayer, and W. Burgard, “Consistent Mapping of Multistory Buildings by Introducing Global Constraints to Graph-based SLAM,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Anchorage, Alaska, 2010.
    [BibTeX] [PDF]
    [none]
    @InProceedings{karg2010,
    title = {Consistent Mapping of Multistory Buildings by Introducing Global Constraints to Graph-based {SLAM}},
    author = {M. Karg and K.M. Wurm and C. Stachniss and K. Dietmayer and W. Burgard},
    booktitle = icra,
    year = {2010},
    address = {Anchorage, Alaska},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/karg10icra.pdf},
    }

  • F. Korč, D. Schneider, and W. Förstner, “On Nonparametric Markov Random Field Estimation for Fast Automatic Segmentation of MRI Knee Data,” in Proc. of the 4th Medical Image Analysis for the Clinic – A Grand Challenge workshop, MICCAI, 2010, p. 261–270.
    [BibTeX] [PDF]

    We present a fast automatic reproducible method for 3d semantic segmentation of magnetic resonance images of the knee. We formulate a single global model that allows to jointly segment all classes. The model estimation was performed automatically without manual interaction and parameter tuning. The segmentation of a magnetic resonance image with 11 Mio voxels took approximately one minute. Our labeling results by far do not reach the performance of complex state of the art approaches designed to produce clinically relevant results. Our results could potentially be useful for rough visualization or initialization of computationally demanding methods. Our main contribution is to provide insights in possible strategies when employing global statistical models

    @InProceedings{korvc2010nonparametric,
    title = {On Nonparametric Markov Random Field Estimation for Fast Automatic Segmentation of MRI Knee Data},
    author = {Kor{\vc}, Filip and Schneider, David and F\"orstner, Wolfgang},
    booktitle = {Proc. of the 4th Medical Image Analysis for the Clinic - A Grand Challenge workshop, MICCAI},
    year = {2010},
    note = {Beijing},
    pages = {261--270},
    abstract = {We present a fast automatic reproducible method for 3d semantic segmentation of magnetic resonance images of the knee. We formulate a single global model that allows to jointly segment all classes. The model estimation was performed automatically without manual interaction and parameter tuning. The segmentation of a magnetic resonance image with 11 Mio voxels took approximately one minute. Our labeling results by far do not reach the performance of complex state of the art approaches designed to produce clinically relevant results. Our results could potentially be useful for rough visualization or initialization of computationally demanding methods. Our main contribution is to provide insights in possible strategies when employing global statistical models},
    url = {https://www.ipb.uni-bonn.de/pdfs/Korvc2010Nonparametric.pdf},
    }

  • H. Kretzschmar, G. Grisetti, and C. Stachniss, “Lifelong Map Learning for Graph-based SLAM in Static Environments,” KI – Künstliche Intelligenz, vol. 24, p. 199–206, 2010.
    [BibTeX]
    [none]
    @Article{kretzschmar2010,
    title = {Lifelong Map Learning for Graph-based {SLAM} in Static Environments},
    author = {H. Kretzschmar and G. Grisetti and C. Stachniss},
    journal = {{KI} -- {K}\"unstliche {I}ntelligenz},
    year = {2010},
    pages = {199--206},
    volume = {24},
    abstract = {[none]},
    issue = {3},
    timestamp = {2014.04.24},
    }

  • J. Müller, C. Stachniss, K. O. Arras, and W. Burgard, “Socially Inspired Motion Planning for Mobile Robots in Populated Environments,” in Cognitive Systems, , 2010.
    [BibTeX]
    [none]
    @InCollection{muller2010,
    title = {Socially Inspired Motion Planning for Mobile Robots in Populated Environments},
    author = {M\"{u}ller, J. and Stachniss, C. and Arras, K.O. and Burgard, W.},
    booktitle = {Cognitive Systems},
    publisher = springer,
    year = {2010},
    note = {In press},
    series = {Cognitive Systems Monographs},
    abstract = {[none]},
    timestamp = {2014.04.24},
    }

  • T. Mewes, B. Waske, J. Franke, and G. Menz, “Derivation of stress severities in wheat from hyperspectral data using support vector regression,” in 2nd Workshop on Hyperspectral Image and Signal Processing: Evolution in Remote Sensing (WHISPERS 2010), 2010. doi:10.1109/WHISPERS.2010.5594921
    [BibTeX]

    The benefits and limitations of crop stress detection by hyperspectral data analysis have been examined in detail. It could thereby be demonstrated that even a differentiation between healthy and fungal infected wheat stands is possible and profits by analyzing entire spectra or specifically selected spectral bands/ranges. For reasons of practicability in agriculture, spatial information about the health status of crop plants beyond a binary classification would be a major benefit. Thus, the potential of hyperspectral data for the derivation of several disease severity classes or moreover the derivation of continual disease severity has to be further examined. In the present study, a state-of-the-art regression approach using support vector machines (SVM) has been applied to hyperspectral AISA-Dual data to derive the disease severity caused by leaf rust (Puccinina recondita) in wheat. Ground truth disease ratings were realized within an experimental field. A mean correlation coefficient of r=0.69 between severities and support vector regression predicted severities could be achieved using indepent training and test data. The results show that the SVR is generally suitable for the derivation of continual disease severity values, but the crucial point is the uncertainty in the reference severity data, which is used to train the regression.

    @InProceedings{mewes2010derivation,
    title = {Derivation of stress severities in wheat from hyperspectral data using support vector regression},
    author = {Mewes, T. and Waske, Bj\"orn and Franke, J. and Menz, G.},
    booktitle = {2nd Workshop on Hyperspectral Image and Signal Processing: Evolution in Remote Sensing (WHISPERS 2010)},
    year = {2010},
    abstract = {The benefits and limitations of crop stress detection by hyperspectral data analysis have been examined in detail. It could thereby be demonstrated that even a differentiation between healthy and fungal infected wheat stands is possible and profits by analyzing entire spectra or specifically selected spectral bands/ranges. For reasons of practicability in agriculture, spatial information about the health status of crop plants beyond a binary classification would be a major benefit. Thus, the potential of hyperspectral data for the derivation of several disease severity classes or moreover the derivation of continual disease severity has to be further examined. In the present study, a state-of-the-art regression approach using support vector machines (SVM) has been applied to hyperspectral AISA-Dual data to derive the disease severity caused by leaf rust (Puccinina recondita) in wheat. Ground truth disease ratings were realized within an experimental field. A mean correlation coefficient of r=0.69 between severities and support vector regression predicted severities could be achieved using indepent training and test data. The results show that the SVR is generally suitable for the derivation of continual disease severity values, but the crucial point is the uncertainty in the reference severity data, which is used to train the regression.},
    doi = {10.1109/WHISPERS.2010.5594921},
    keywords = {AISA-Dual data;Puccinina recondita;agriculture;binary classification;crop stress detection;fungal infected wheat;hyperspectral data;leaf rust;stress severity derivation;support vector machine;support vector regression;agriculture;crops;geophysical techniques;regression analysis;support vector machines;},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • M. Muffert, “Verwendung eines mosaikbasierten Kamerasystems zur Bestimmung von räumlichen Orientierungsänderungen von mobilen Objekten,” Master Thesis, 2010.
    [BibTeX] [PDF]

    The estimation of relative spatial positions and orientations is one of the most important tasks of engineering geodesy. For example, we need these parameters in precision farming or controlling the driving direction of construction vehicles. It is usual to use multi-sensor systems in these applications which are often a combination of GPS-sensors with Inertial Navigation Systems (INS). An optimal solution for the searched parameters could be achieved using filtering processes.

    @MastersThesis{muffert2010verwendung,
    title = {Verwendung eines mosaikbasierten Kamerasystems zur Bestimmung von r\"aumlichen Orientierungs\"anderungen von mobilen Objekten},
    author = {Muffert, Maxilmilian},
    school = {Institute of Photogrammetry,University of Bonn},
    year = {2010},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Prof. Dr.-Ing. Heiner Kuhlmann},
    abstract = {The estimation of relative spatial positions and orientations is one of the most important tasks of engineering geodesy. For example, we need these parameters in precision farming or controlling the driving direction of construction vehicles. It is usual to use multi-sensor systems in these applications which are often a combination of GPS-sensors with Inertial Navigation Systems (INS). An optimal solution for the searched parameters could be achieved using filtering processes.},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Muffert2010Verwendung.pdf},
    }

  • M. Muffert, J. Siegemund, and W. Förstner, “The estimation of spatial positions by using an omnidirectional camera system,” in 2nd International Conf. on Machine Control & Guidance, 2010, p. 95–104.
    [BibTeX] [PDF]

    With an omnidirectional camera system, it is possible to take 360-degree views of the surrounding area at each camera position. These systems are used particularly in robotic applications, in autonomous navigation and supervision technology for ego-motion estimation. In addition to the visual capture of the environment itself, we can compute the parameters of orientation and position from image sequences, i.e. we get three parameters of position and three of orientation (yaw rate, pitch and roll angle) at each time of acquisition. The aim of the presented project is to investigate the quality of the spatial trajectory of a mobile survey vehicle from the recorded image sequences. In this paper, we explain the required photogrammetric background and show the advantages of omnidirectional camera systems for this task. We present the first results on our test set and discuss alternative applications for omnidirectional cameras.

    @InProceedings{muffert2010estimation,
    title = {The estimation of spatial positions by using an omnidirectional camera system},
    author = {Muffert, Maximilian and Siegemund, Jan and F\"orstner, Wolfgang},
    booktitle = {2nd International Conf. on Machine Control \& Guidance},
    year = {2010},
    month = mar,
    pages = {95--104},
    abstract = {With an omnidirectional camera system, it is possible to take 360-degree views of the surrounding area at each camera position. These systems are used particularly in robotic applications, in autonomous navigation and supervision technology for ego-motion estimation. In addition to the visual capture of the environment itself, we can compute the parameters of orientation and position from image sequences, i.e. we get three parameters of position and three of orientation (yaw rate, pitch and roll angle) at each time of acquisition. The aim of the presented project is to investigate the quality of the spatial trajectory of a mobile survey vehicle from the recorded image sequences. In this paper, we explain the required photogrammetric background and show the advantages of omnidirectional camera systems for this task. We present the first results on our test set and discuss alternative applications for omnidirectional cameras.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Muffert2010estimation.pdf},
    }

  • C. Plagemann, C. Stachniss, J. Hess, F. Endres, and N. Franklin, “A Nonparametric Learning Approach to Range Sensing from Omnidirectional Vision,” Journal on Robotics and Autonomous Systems (RAS), vol. 58, p. 762–772, 2010.
    [BibTeX]
    [none]
    @Article{plagemann2010,
    title = {A Nonparametric Learning Approach to Range Sensing from Omnidirectional Vision},
    author = {C. Plagemann and C. Stachniss and J. Hess and F. Endres and N. Franklin},
    journal = jras,
    year = {2010},
    pages = {762--772},
    volume = {58},
    abstract = {[none]},
    issue = {6},
    timestamp = {2014.04.24},
    }

  • M. Röder-Sorge, “Konzeption und Anwendung von Entscheidungsnetzwerken im Städtebau,” Diploma Thesis Master Thesis, 2010.
    [BibTeX]

    In dieser Arbeit wird mit dem Programm Netica ein Entscheidungsnetzwerk aufgestellt, das für sechs Gebäude eines Wohnkomplexes in Leipzig-Grünau die optimalen Entscheidungen über deren zukünftige Entwicklung ermittelt. In das Netzwerk werden die Interessen der Mieter, der Stadtverwaltung und der Wohnungsunternehmen Grünaus mit einbezogen, wobei mit letzeren Interviews über die Gewichtung der Einflussfaktoren im Stadtumbau geführt wurden. Netica eignet sich nur mit Einschränkungen für die Modellierung und Entscheidungsfindung im Stadtumbau, da nicht mehr als sechs Gebäude modelliert werden können und, genau wie mit allen anderen Entscheidungsnetzwerkprogrammen, die Darstellung des Free-Rider-Problems nicht möglich ist.

    @MastersThesis{roder-sorge2010konzeption,
    title = {Konzeption und Anwendung von Entscheidungsnetzwerken im St\"adtebau},
    author = {R\"oder-Sorge, Marisa},
    school = {University of Bonn},
    year = {2010},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Prof. Dr.-Ing. Theo K\"otter},
    type = {Diploma Thesis},
    abstract = {In dieser Arbeit wird mit dem Programm Netica ein Entscheidungsnetzwerk aufgestellt, das f\"ur sechs Geb\"aude eines Wohnkomplexes in Leipzig-Gr\"unau die optimalen Entscheidungen \"uber deren zuk\"unftige Entwicklung ermittelt. In das Netzwerk werden die Interessen der Mieter, der Stadtverwaltung und der Wohnungsunternehmen Gr\"unaus mit einbezogen, wobei mit letzeren Interviews \"uber die Gewichtung der Einflussfaktoren im Stadtumbau gef\"uhrt wurden. Netica eignet sich nur mit Einschr\"ankungen f\"ur die Modellierung und Entscheidungsfindung im Stadtumbau, da nicht mehr als sechs Geb\"aude modelliert werden k\"onnen und, genau wie mit allen anderen Entscheidungsnetzwerkprogrammen, die Darstellung des Free-Rider-Problems nicht m\"oglich ist.},
    }

  • R. Roscher, F. Schindler, and W. Förstner, “High Dimensional Correspondences from Low Dimensional Manifolds – An Empirical Comparison of Graph-based Dimensionality Reduction Algorithms,” in The 3rd International Workshop on Subspace Methods, in conjunction with ACCV2010, 2010, p. 10. doi:10.1007/978-3-642-22819-3_34
    [BibTeX] [PDF]

    We discuss the utility of dimensionality reduction algorithms to put data points in high dimensional spaces into correspondence by learning a transformation between assigned data points on a lower dimensional structure. We assume that similar high dimensional feature spaces are characterized by a similar underlying low dimensional structure. To enable the determination of an affine transformation between two data sets we make use of well-known dimensional reduction algorithms. We demonstrate this procedure for applications like classification and assignments between two given data sets and evaluate six well-known algorithms during several experiments with different objectives. We show that with these algorithms and our transformation approach high dimensional data sets can be related to each other. We also show that linear methods turn out to be more suitable for assignment tasks, whereas graph-based methods appear to be superior for classification tasks.

    @InProceedings{roscher2010high,
    title = {High Dimensional Correspondences from Low Dimensional Manifolds -- An Empirical Comparison of Graph-based Dimensionality Reduction Algorithms},
    author = {Roscher, Ribana and Schindler, Falko and F\"orstner, Wolfgang},
    booktitle = {The 3rd International Workshop on Subspace Methods, in conjunction with ACCV2010},
    year = {2010},
    note = {Queenstown, New Zealand},
    pages = {10},
    abstract = {We discuss the utility of dimensionality reduction algorithms to put data points in high dimensional spaces into correspondence by learning a transformation between assigned data points on a lower dimensional structure. We assume that similar high dimensional feature spaces are characterized by a similar underlying low dimensional structure. To enable the determination of an affine transformation between two data sets we make use of well-known dimensional reduction algorithms. We demonstrate this procedure for applications like classification and assignments between two given data sets and evaluate six well-known algorithms during several experiments with different objectives. We show that with these algorithms and our transformation approach high dimensional data sets can be related to each other. We also show that linear methods turn out to be more suitable for assignment tasks, whereas graph-based methods appear to be superior for classification tasks.},
    doi = {10.1007/978-3-642-22819-3_34},
    url = {https://www.ipb.uni-bonn.de/pdfs/Roscher2010High.pdf;Poster:Roscher2010High_Poster.pdf},
    }

  • R. Roscher, B. Waske, and W. Förstner, “Kernel Discriminative Random Fields for land cover classification,” in IAPR Workshop on Pattern Recognition in Remote Sensing (PRRS), 2010. doi:10.1109/PRRS.2010.5742801
    [BibTeX] [PDF]

    Logistic Regression has become a commonly used classifier, not only due to its probabilistic output and its direct usage in multi-class cases. We use a sparse Kernel Logistic Regression approach – the Import Vector Machines – for land cover classification. We improve our segmentation results applying a Discriminative Random Field framework on the probabilistic classification output. We consider the performance regarding to the classification accuracy and the complexity and compare it to the Gaussian Maximum Likelihood classification and the Support Vector Machines.

    @InProceedings{roscher2010kernel,
    title = {Kernel Discriminative Random Fields for land cover classification},
    author = {Roscher, Ribana and Waske, Bj\"orn and F\"orstner, Wolfgang},
    booktitle = {IAPR Workshop on Pattern Recognition in Remote Sensing (PRRS)},
    year = {2010},
    note = {Istanbul, Turkey},
    abstract = {Logistic Regression has become a commonly used classifier, not only due to its probabilistic output and its direct usage in multi-class cases. We use a sparse Kernel Logistic Regression approach - the Import Vector Machines - for land cover classification. We improve our segmentation results applying a Discriminative Random Field framework on the probabilistic classification output. We consider the performance regarding to the classification accuracy and the complexity and compare it to the Gaussian Maximum Likelihood classification and the Support Vector Machines.},
    doi = {10.1109/PRRS.2010.5742801},
    keywords = {Gaussian maximum likelihood classification;image segmentation;import vector machine;kernel discriminative random fields;land cover classification;logistic regression;probabilistic classification;support vector machines;geophysical image processing;image classification;image segmentation;support vector machines;terrain mapping;},
    owner = {waske},
    timestamp = {2012.09.05},
    url = {https://www.ipb.uni-bonn.de/pdfs/Roscher2010Kernel.pdf},
    }

  • J. Siegemund, D. Pfeiffer, U. Franke, and W. Förstner, “Curb Reconstruction using Conditional Random Fields,” in IEEE Intelligent Vehicles Symposium (IV), 2010, p. 203–210. doi:10.1109/IVS.2010.5548096
    [BibTeX] [PDF]

    This paper presents a generic framework for curb detection and reconstruction in the context of driver assistance systems. Based on a 3D point cloud, we estimate the parameters of a 3D curb model, incorporating also the curb adjacent surfaces, e.g. street and sidewalk. We apply an iterative two step approach. First, the measured 3D points, e.g., obtained from dense stereo vision, are assigned to the curb adjacent surfaces using loopy belief propagation on a Conditional Random Field. Based on this result, we reconstruct the surfaces and in particular the curb. Our system is not limited to straight-line curbs, i.e. it is able to deal with curbs of different curvature and varying height. The proposed algorithm runs in real-time on our demon- strator vehicle and is evaluated in urban real-world scenarios. It yields highly accurate results even for low curbs up to 20 m distance.

    @InProceedings{siegemund2010curb,
    title = {Curb Reconstruction using Conditional Random Fields},
    author = {Siegemund, Jan and Pfeiffer, David and Franke, Uwe and F\"orstner, Wolfgang},
    booktitle = {IEEE Intelligent Vehicles Symposium (IV)},
    year = {2010},
    month = jun,
    pages = {203--210},
    publisher = {IEEE Computer Society},
    abstract = {This paper presents a generic framework for curb detection and reconstruction in the context of driver assistance systems. Based on a 3D point cloud, we estimate the parameters of a 3D curb model, incorporating also the curb adjacent surfaces, e.g. street and sidewalk. We apply an iterative two step approach. First, the measured 3D points, e.g., obtained from dense stereo vision, are assigned to the curb adjacent surfaces using loopy belief propagation on a Conditional Random Field. Based on this result, we reconstruct the surfaces and in particular the curb. Our system is not limited to straight-line curbs, i.e. it is able to deal with curbs of different curvature and varying height. The proposed algorithm runs in real-time on our demon- strator vehicle and is evaluated in urban real-world scenarios. It yields highly accurate results even for low curbs up to 20 m distance.},
    doi = {10.1109/IVS.2010.5548096},
    url = {https://www.ipb.uni-bonn.de/pdfs/Siegemund2010Curb.pdf},
    }

  • R. Steffen, J. Frahm, and W. Förstner, “Relative Bundle Adjustment based on Trifocal Constraints,” in ECCV Workshop on Reconstruction and Modeling of Large-Scale 3D Virtual Environments, 2010. doi:10.1007/978-3-642-35740-4_22
    [BibTeX] [PDF]

    In this paper we propose a novel approach to bundle adjustment for large-scale camera configurations. The method does not need to include the 3D points in the optimization as parameters. Additionally, we model the parameters of a camera only relative to a nearby camera to achieve a stable estimation of all cameras. This guarantees to yield a normal equation system with a numerical condition, which practically is independent of the number of images. Secondly, instead of using the classical perspective relation between object point, camera and image point, we use epipolar and trifocal constraints to implicitly establish the relations between the cameras via the object structure. This avoids the explicit reference to 3D points thereby handling points far from the camera in a numerically stable fashion. We demonstrate the resulting stability and high convergence rates using synthetic and real data.

    @InProceedings{steffen2010relative,
    title = {Relative Bundle Adjustment based on Trifocal Constraints},
    author = {Steffen, Richard and Frahm, Jan-Michael and F\"orstner, Wolfgang},
    booktitle = {ECCV Workshop on Reconstruction and Modeling of Large-Scale 3D Virtual Environments},
    year = {2010},
    organization = {ECCV 2010 Crete, Greece},
    abstract = {In this paper we propose a novel approach to bundle adjustment for large-scale camera configurations. The method does not need to include the 3D points in the optimization as parameters. Additionally, we model the parameters of a camera only relative to a nearby camera to achieve a stable estimation of all cameras. This guarantees to yield a normal equation system with a numerical condition, which practically is independent of the number of images. Secondly, instead of using the classical perspective relation between object point, camera and image point, we use epipolar and trifocal constraints to implicitly establish the relations between the cameras via the object structure. This avoids the explicit reference to 3D points thereby handling points far from the camera in a numerically stable fashion. We demonstrate the resulting stability and high convergence rates using synthetic and real data.},
    doi = {10.1007/978-3-642-35740-4_22},
    url = {https://www.ipb.uni-bonn.de/pdfs/Steffen2010Relative.pdf},
    }

  • J. Sturm, A. Jain, C. Stachniss, C. C. Kemp, and W. Burgard, “Robustly Operating Articulated Objects based on Experience,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), Taipei, Taiwan, 2010.
    [BibTeX] [PDF]
    [none]
    @InProceedings{sturm2010b,
    title = {Robustly Operating Articulated Objects based on Experience},
    author = {J. Sturm and A. Jain and C. Stachniss and C.C. Kemp and W. Burgard},
    booktitle = iros,
    year = {2010},
    address = {Taipei, Taiwan},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/sturm10iros.pdf},
    }

  • J. Sturm, K. Konolige, C. Stachniss, and W. Burgard, “Vision-based Detection for Learning Articulation Models of Cabinet Doors and Drawers in Household Environments,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Anchorage, Alaska, 2010.
    [BibTeX] [PDF]
    [none]
    @InProceedings{sturm2010,
    title = {Vision-based Detection for Learning Articulation Models of Cabinet Doors and Drawers in Household Environments},
    author = {J. Sturm and K. Konolige and C. Stachniss and W. Burgard},
    booktitle = icra,
    year = {2010},
    address = {Anchorage, Alaska},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/sturm10icra.pdf},
    }

  • J. Sturm, K. Konolige, C. Stachniss, and W. Burgard, “3D Pose Estimation, Tracking and Model Learning of Articulated Objects from Dense Depth Video using Projected Texture Stereo,” in Proc. of the Workshop RGB-D: Advanced Reasoning with Depth Cameras at Robotics: Science and Systems (RSS), Zaragoza, Spain, 2010.
    [BibTeX] [PDF]
    [none]
    @InProceedings{sturm2010a,
    title = {3D Pose Estimation, Tracking and Model Learning of Articulated Objects from Dense Depth Video using Projected Texture Stereo},
    author = {J. Sturm and K. Konolige and C. Stachniss and W. Burgard},
    booktitle = {Proc. of the Workshop RGB-D: Advanced Reasoning with Depth Cameras at Robotics: Science and Systems (RSS)},
    year = {2010},
    address = {Zaragoza, Spain},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/sturm10rssws.pdf},
    }

  • S. Valero, J. Chanussot, J. A. Benediktsson, H. Talbot, and B. Waske, “Advanced directional mathematical morphology for the detection of the road network in very high resolution remote sensing images,” Pattern Recognition Letters, vol. 31, iss. 10, p. 1120–1127, 2010. doi:10.1016/j.patrec.2009.12.018
    [BibTeX]

    Very high spatial resolution (VHR) images allow to feature man-made structures such as roads and thus enable their accurate analysis. Geometrical characteristics can be extracted using mathematical morphology. However, the prior choice of a reference shape (structuring element) introduces a shape-bias. This paper presents a new method for extracting roads in Very High Resolution remotely sensed images based on advanced directional morphological operators. The proposed approach introduces the use of Path Openings and Path Closings in order to extract structural pixel information. These morphological operators remain flexible enough to fit rectilinear and slightly curved structures since they do not depend on the choice of a structural element shape. As a consequence, they outperform standard approaches using rotating rectangular structuring elements. The method consists in building a granulometry chain using Path Openings and Path Closing to construct Morphological Profiles. For each pixel, the Morphological Profile constitutes the feature vector on which our road extraction is based. (C) 2009 Published by Elsevier B.V.

    @Article{valero2010advanced,
    title = {Advanced directional mathematical morphology for the detection of the road network in very high resolution remote sensing images},
    author = {Valero, Sivia and Chanussot, Jocelyn and Benediktsson, Jon Atli and Talbot, Huges and Waske, Bj\"orn},
    journal = {Pattern Recognition Letters},
    year = {2010},
    month = jul,
    number = {10},
    pages = {1120--1127},
    volume = {31},
    abstract = {Very high spatial resolution (VHR) images allow to feature man-made structures such as roads and thus enable their accurate analysis. Geometrical characteristics can be extracted using mathematical morphology. However, the prior choice of a reference shape (structuring element) introduces a shape-bias. This paper presents a new method for extracting roads in Very High Resolution remotely sensed images based on advanced directional morphological operators. The proposed approach introduces the use of Path Openings and Path Closings in order to extract structural pixel information. These morphological operators remain flexible enough to fit rectilinear and slightly curved structures since they do not depend on the choice of a structural element shape. As a consequence, they outperform standard approaches using rotating rectangular structuring elements. The method consists in building a granulometry chain using Path Openings and Path Closing to construct Morphological Profiles. For each pixel, the Morphological Profile constitutes the feature vector on which our road extraction is based. (C) 2009 Published by Elsevier B.V.},
    doi = {10.1016/j.patrec.2009.12.018},
    owner = {waske},
    si = {SI},
    sn = {0167-8655},
    tc = {4},
    timestamp = {2012.09.04},
    ut = {WOS:000279284000007},
    z8 = {2},
    z9 = {6},
    zb = {1},
    }

  • B. Waske, S. van der Linden, J. A. Benediktsson, A. Rabe, and P. Hostert, “Sensitivity of Support Vector Machines to Random Feature Selection in Classification of Hyperspectral Data,” IEEE Transactions on Geoscience and Remote Sensing, vol. 48, iss. 7, p. 2880–2889, 2010. doi:10.1109/TGRS.2010.2041784
    [BibTeX]

    The accuracy of supervised land cover classifications depends on factors such as the chosen classification algorithm, adequate training data, the input data characteristics, and the selection of features. Hyperspectral imaging provides more detailed spectral and spatial information on the land cover than other remote sensing resources. Over the past ten years, traditional and formerly widely accepted statistical classification methods have been superseded by more recent machine learning algorithms, e.g., support vector machines (SVMs), or by multiple classifier systems (MCS). This can be explained by limitations of statistical approaches with regard to high-dimensional data, multimodal classes, and often limited availability of training data. In the presented study, MCSs based on SVM and random feature selection (RFS) are applied to explore the potential of a synergetic use of the two concepts. We investigated how the number of selected features and the size of the MCS influence classification accuracy using two hyperspectral data sets, from different environmental settings. In addition, experiments were conducted with a varying number of training samples. Accuracies are compared with regular SVM and random forests. Experimental results clearly demonstrate that the generation of an SVM-based classifier system with RFS significantly improves overall classification accuracy as well as producer’s and user’s accuracies. In addition, the ensemble strategy results in smoother, i.e., more realistic, classification maps than those from stand-alone SVM. Findings from the experiments were successfully transferred onto an additional hyperspectral data set.

    @Article{waske2010sensitivity,
    title = {Sensitivity of Support Vector Machines to Random Feature Selection in Classification of Hyperspectral Data},
    author = {Waske, Bj\"orn and van der Linden, Sebastian and Benediktsson, Jon Atli and Rabe, Andreas and Hostert, Patrick},
    journal = {IEEE Transactions on Geoscience and Remote Sensing},
    year = {2010},
    month = jul,
    number = {7},
    pages = {2880--2889},
    volume = {48},
    abstract = {The accuracy of supervised land cover classifications depends on factors such as the chosen classification algorithm, adequate training data, the input data characteristics, and the selection of features. Hyperspectral imaging provides more detailed spectral and spatial information on the land cover than other remote sensing resources. Over the past ten years, traditional and formerly widely accepted statistical classification methods have been superseded by more recent machine learning algorithms, e.g., support vector machines (SVMs), or by multiple classifier systems (MCS). This can be explained by limitations of statistical approaches with regard to high-dimensional data, multimodal classes, and often limited availability of training data. In the presented study, MCSs based on SVM and random feature selection (RFS) are applied to explore the potential of a synergetic use of the two concepts. We investigated how the number of selected features and the size of the MCS influence classification accuracy using two hyperspectral data sets, from different environmental settings. In addition, experiments were conducted with a varying number of training samples. Accuracies are compared with regular SVM and random forests. Experimental results clearly demonstrate that the generation of an SVM-based classifier system with RFS significantly improves overall classification accuracy as well as producer's and user's accuracies. In addition, the ensemble strategy results in smoother, i.e., more realistic, classification maps than those from stand-alone SVM. Findings from the experiments were successfully transferred onto an additional hyperspectral data set.},
    doi = {10.1109/TGRS.2010.2041784},
    owner = {waske},
    sn = {0196-2892},
    tc = {10},
    timestamp = {2012.09.04},
    ut = {WOS:000281789800010},
    z8 = {0},
    z9 = {10},
    zb = {2},
    }

  • S. Wenzel and L. Hotz, “The Role of Sequences for Incremental Learning,” in ICAART 2010 – Proc. of the International Conf. on Agents and Artificial Intelligence, Valencia, Spain, 2010, p. 434–439.
    [BibTeX] [PDF]

    In this paper, we point out the role of sequences of samples for training an incremental learning method. We define characteristics of incremental learning methods to describe the influence of sample ordering on the performance of a learned model. We show the influence of sequence for two different types of incremental learning. One is aimed on learning structural models, the other on learning models to discriminate object classes. In both cases, we show the possibility to find good sequences before starting the training.

    @InProceedings{wenzel2010role,
    title = {The Role of Sequences for Incremental Learning},
    author = {Wenzel, Susanne and Hotz, Lothar},
    booktitle = {ICAART 2010 - Proc. of the International Conf. on Agents and Artificial Intelligence},
    year = {2010},
    address = {Valencia, Spain},
    editor = {Joaquim Filipe and Ana L. N. Fred and Bernadette Sharp},
    month = jan,
    pages = {434--439},
    publisher = {INSTICC Press},
    volume = {1},
    abstract = {In this paper, we point out the role of sequences of samples for training an incremental learning method. We define characteristics of incremental learning methods to describe the influence of sample ordering on the performance of a learned model. We show the influence of sequence for two different types of incremental learning. One is aimed on learning structural models, the other on learning models to discriminate object classes. In both cases, we show the possibility to find good sequences before starting the training.},
    isbn = {978-989-674-021-4},
    timestamp = {2011.01.18},
    url = {https://www.ipb.uni-bonn.de/pdfs/Wenzel2010Role.pdf},
    }

  • K. M. Wurm, C. Dornhege, P. Eyerich, C. Stachniss, B. Nebel, and W. Burgard, “Coordinated Exploration with Marsupial Teams of Robots using Temporal Symbolic Planning,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), Taipei, Taiwan, 2010.
    [BibTeX] [PDF]
    [none]
    @InProceedings{wurm2010a,
    title = {Coordinated Exploration with Marsupial Teams of Robots using Temporal Symbolic Planning},
    author = {K.M. Wurm and C. Dornhege and P. Eyerich and C. Stachniss and B. Nebel and W. Burgard},
    booktitle = iros,
    year = {2010},
    address = {Taipei, Taiwan},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/wurm10iros.pdf},
    }

  • K. M. Wurm, A. Hornung, M. Bennewitz, C. Stachniss, and W. Burgard, “OctoMap: A Probabilistic, Flexible, and Compact 3D Map Representation for Robotic Systems,” in Proc. of the ICRA 2010 Workshop on Best Practice in 3D Perception and Modeling for Mobile Manipulation, Anchorage, AK, USA, 2010.
    [BibTeX] [PDF]
    [none]
    @InProceedings{wurm2010,
    title = {{OctoMap}: A Probabilistic, Flexible, and Compact {3D} Map Representation for Robotic Systems},
    author = {K.M. Wurm and A. Hornung and M. Bennewitz and C. Stachniss and W. Burgard},
    booktitle = {Proc. of the ICRA 2010 Workshop on Best Practice in 3D Perception and Modeling for Mobile Manipulation},
    year = {2010},
    address = {Anchorage, AK, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/wurm10icraws.pdf},
    }

  • K. M. Wurm, C. Stachniss, and G. Grisetti, “Bridging the Gap Between Feature- and Grid-based SLAM,” Journal on Robotics and Autonomous Systems (RAS), vol. 58, iss. 2, pp. 140-148, 2010. doi:10.1016/j.robot.2009.09.009
    [BibTeX] [PDF]
    [none]
    @Article{wurm2010b,
    title = {Bridging the Gap Between Feature- and Grid-based SLAM},
    author = {Wurm, K.M. and Stachniss, C. and Grisetti, G.},
    journal = jras,
    year = {2010},
    number = {2},
    pages = {140 - 148},
    volume = {58},
    abstract = {[none]},
    doi = {10.1016/j.robot.2009.09.009},
    issn = {0921-8890},
    timestamp = {2014.04.24},
    url = {https://ais.informatik.uni-freiburg.de/publications/papers/wurm10ras.pdf},
    }

  • M. Y. Yang, Y. Cao, W. Förstner, and J. McDonald, “Robust wide baseline scene alignment based on 3D viewpoint normalization,” in International Conf. on Advances in Visual Computing, 2010, p. 654–665. doi:10.1007/978-3-642-17289-2_63
    [BibTeX] [PDF]

    This paper presents a novel scheme for automatically aligning two widely separated 3D scenes via the use of viewpoint invariant features. The key idea of the proposed method is following. First, a number of dominant planes are extracted in the SfM 3D point cloud using a novel method integrating RANSAC and MDL to describe the underlying 3D geometry in urban settings. With respect to the extracted 3D planes, the original camera viewing directions are rectified to form the front-parallel views of the scene. Viewpoint invariant features are extracted on the canonical views to provide a basis for further matching. Compared to the conventional 2D feature detectors (e.g. SIFT, MSER), the resulting features have following advantages: (1) they are very discriminative and robust to perspective distortions and viewpoint changes due to exploiting scene structure; (2) the features contain useful local patch information which allow for efficient feature matching. Using the novel viewpoint invariant features, wide-baseline 3D scenes are automatically aligned in terms of robust image matching. The performance of the proposed method is comprehensively evaluated in our experiments. It’s demonstrated that 2D image feature matching can be significantly improved by considering 3D scene structure.

    @InProceedings{yang2010robust,
    title = {Robust wide baseline scene alignment based on 3D viewpoint normalization},
    author = {Yang, Michael Ying and Cao, Yanpeng and F\"orstner, Wolfgang and McDonald, John},
    booktitle = {International Conf. on Advances in Visual Computing},
    year = {2010},
    pages = {654--665},
    publisher = {Springer-Verlag},
    abstract = {This paper presents a novel scheme for automatically aligning two widely separated 3D scenes via the use of viewpoint invariant features. The key idea of the proposed method is following. First, a number of dominant planes are extracted in the SfM 3D point cloud using a novel method integrating RANSAC and MDL to describe the underlying 3D geometry in urban settings. With respect to the extracted 3D planes, the original camera viewing directions are rectified to form the front-parallel views of the scene. Viewpoint invariant features are extracted on the canonical views to provide a basis for further matching. Compared to the conventional 2D feature detectors (e.g. SIFT, MSER), the resulting features have following advantages: (1) they are very discriminative and robust to perspective distortions and viewpoint changes due to exploiting scene structure; (2) the features contain useful local patch information which allow for efficient feature matching. Using the novel viewpoint invariant features, wide-baseline 3D scenes are automatically aligned in terms of robust image matching. The performance of the proposed method is comprehensively evaluated in our experiments. It's demonstrated that 2D image feature matching can be significantly improved by considering 3D scene structure.},
    doi = {10.1007/978-3-642-17289-2_63},
    url = {https://www.ipb.uni-bonn.de/pdfs/Yang2010Robust.pdf},
    }

  • M. Y. Yang and W. Förstner, “Plane Detection in Point Cloud Data,” Department of Photogrammetry, University of Bonn, TR-IGG-P-2010-01, 2010.
    [BibTeX] [PDF]

    Plane detection is a prerequisite to a wide variety of vision tasks. RANdom SAmple Consensus (RANSAC) algorithm is widely used for plane detection in point cloud data. Minimum description length (MDL) principle is used to deal with several competing hypothesis. This paper presents a new approach to the plane detection by integrating RANSAC and MDL. The method could avoid detecting wrong planes due to the complex geometry of the 3D data. The paper tests the performance of proposed method on both synthetic and real data.

    @TechReport{yang2010plane,
    title = {Plane Detection in Point Cloud Data},
    author = {Yang, Michael Ying and F\"orstner, Wolfgang},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2010},
    number = {TR-IGG-P-2010-01 },
    abstract = {Plane detection is a prerequisite to a wide variety of vision tasks. RANdom SAmple Consensus (RANSAC) algorithm is widely used for plane detection in point cloud data. Minimum description length (MDL) principle is used to deal with several competing hypothesis. This paper presents a new approach to the plane detection by integrating RANSAC and MDL. The method could avoid detecting wrong planes due to the complex geometry of the 3D data. The paper tests the performance of proposed method on both synthetic and real data.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Yang2010Plane.pdf},
    }

  • M. Y. Yang, W. Förstner, and M. Drauschke, “Hierarchical Conditional Random Field for Multi-class Image Classification,” in International Conf. on Computer Vision Theory and Applications (VISSAPP), 2010, p. 464–469.
    [BibTeX] [PDF]

    Multi-class image classification has made significant advances in recent years through the combination of local and global features. This paper proposes a novel approach called hierarchical conditional random field (HCRF) that explicitly models region adjacency graph and region hierarchy graph structure of an image. This allows to set up a joint and hierarchical model of local and global discriminative methods that augments conditional random field to a multi-layer model. Region hierarchy graph is based on a multi-scale watershed segmentation.

    @InProceedings{yang2010hierarchical,
    title = {Hierarchical Conditional Random Field for Multi-class Image Classification},
    author = {Yang, Michael Ying and F\"orstner, Wolfgang and Drauschke, Martin},
    booktitle = {International Conf. on Computer Vision Theory and Applications (VISSAPP)},
    year = {2010},
    pages = {464--469},
    abstract = {Multi-class image classification has made significant advances in recent years through the combination of local and global features. This paper proposes a novel approach called hierarchical conditional random field (HCRF) that explicitly models region adjacency graph and region hierarchy graph structure of an image. This allows to set up a joint and hierarchical model of local and global discriminative methods that augments conditional random field to a multi-layer model. Region hierarchy graph is based on a multi-scale watershed segmentation.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Yang2011Hierarchical.pdf},
    }

2009

  • B. Abendroth and M. zur Mühlen, “Genauigkeitsbeurteilung und Untersuchungen der Zuverlässigkeit von optischen Onlinemessungen,” Diploma Thesis Master Thesis, 2009.
    [BibTeX] [PDF]

    Vorwort Der Titel “Genauigkeitsbeurteilung und Untersuchungen der Zuverlässigkeit von optischen Onlinemessungen” impliziert eine weite Bandbreite an Untersuchungsmöglichkeiten. Diese allgemeine Einführung gibt einen Überblick über die untersuchten Aspekte dieser Arbeit. Neben der Motivation, die zu der Entstehung dieser Diplomarbeit geführt hat, beinhaltet diese Einführung eine grobe Gliederung der behandelten Themenschwerpunkte. Motivation Ein neues Aufgabengebiet innerhalb der Nahbereichsphotogrammetrie bietet die Konzeption von photogrammetrischen Messsystemen für industrielle Aufgabenstellungen. Die Firma AICON 3D Systems GmbH, mit deren Kooperation diese Arbeit entstand, hat sich auf die Entwicklung solcher Systeme spezialisiert. Sie gehört zu den weltweit führenden Unternehmen im Bereich der optischen kamerabasierten 3D-Vermessung. Ihr Anspruch ist es, hochgenaue und effiziente Produkte im Bereich von Inspektion und Prüfung zu entwickeln und zu überwachen. Ihre Produkte vertreibt das 1990 gegründete Unternehmen überwiegend in der Automobil-, Luft- und Raumfahrtindustrie sowie im Anlagen- und Schiffsbau. Zur Erfassung von dynamischen Vorgängen bietet das Unternehmen echtzeitfähige optische Messsysteme an, die je nach Konfiguration in der Lage sind einzelne signalisierte Punkte als 3D-Koordinaten zu erfassen oder die Bewegung eines Starrkörpers aufzunehmen. Damit diese photogrammetrischen Systeme gegenüber anderen Messsystemen im Konkurrenzkampf bestehen können, müssen sich diese einer ständigen Weiterentwicklung und Verbesserung unterziehen. Dabei steht insbesondere die Wirtschaftlichkeit, Zuverlässigkeit und die Genauigkeit der Systeme im Vordergrund. Außerdem ist es nötig einzelne Messsysteme zu charakterisieren, um sie vergleichbar zu machen und die Einsatzmöglichkeiten aufzuzeigen. Dazu gehört neben den oben genannten Kriterien der Genauigkeit, Zuverlässigkeit und Wirtschaftlichkeit auch das Spektrum der Einsatzmöglichkeiten mit systemspezifischen Rahmenbedingungen. Des Weiteren kann ein Vergleich über Hardware- und Software-Module geschehen. Im weiteren Verlauf dieser Arbeit werden die Eigenschaften der Genauigkeit und der Software-Module näher untersucht. Dabei zeigt eine Genauigkeitsuntersuchung die Grenzen der Messsysteme auf, deren Kenntnis für die Weiterentwicklung von Bedeutung ist. Für die Verbesserung der Software wird diese anhand ihrer vorhandenen Algorithmik untersucht und mit alternativen Berechnungverfahren verglichen. Als Ausgangspunkt für diese Untersuchungen dienen dabei die beiden Onlinemesssysteme WHEELWATCH und MoveINSPECT der Firma AICON 3D Systems GmbH. Die Motivation der Firma AICON 3D Systems GmbH ein Diplomarbeitsthema im Bereich einer Genauigkeitsuntersuchung zu stellen, liegt darin, das vorhandene theoretische ‘Wissen der Universität mit dem praktischen Anwendungsbeispiel der Onlinemesssysteme zu verbinden. Dies gilt auch für den Bereich der Weiterentwicklung der Algorithmik. Damit die Systeme auch in Zukunft wettbewerbsfähig sind, müssen diese ständig weiter entwickelt werden. Aus diesem Grund beinhaltet diese Arbeit die Untersuchung von zwei verschiedenen Problemstellungen, die sich innerhalb der Algorithmen der Systeme ergeben. Aufgabenstellung Das Ziel dieser gesamten Diplomarbeit besteht in der Verbesserung und Weiterentwicklung von Onlinemesssystemen. Dabei sollen theoretische Verfahren, die an der Universität vermittelt werden, auf die speziellen Messsysteme WHEELWATCH und MOVEINSPECT der Firma AICON 3D Systems GmbH angepasst und modifiziert werden. Insbesonders geht es um drei Aspekte der Onlinemesssysteme. Als erstes soll eine Gnauigkeitsuntersuchung der Onlinemesssysteme in Hinblick auf die Bewegungserfassung von Starrkörpern durchgeführt werden. Hierbei dienen statistische Grundlagen dazu, die bisherigen Genauigkeitsangaben von AICON 3D Systems GmbH durch eine statistisch fundierte Angabe zu validieren. Die allgemeine Problemstellung bezieht sich auf die Entwicklung eines Testverfahrens für die Spezifikation von Genauigkeitsangaben der detektierten Bewegung von starren Objekten, die sich im Nahbereich des Messsystems befinden. Unter Nahbereich ist hier eine maximale Entfernung von bis zu 3m zu verstehen. Die nächsten zwei Teilaspekte bestehen in der Beurteilung der bestehenden Algorithmik und dessen Verbesserung durch alternative Lösungsansätze. Hier handelt es sich um zwei allgemeine Probleme. Für das Einkamerasystem werden direkte Lösungsmöglichkeiten des Räumlichen Rückwärtsschnittes aufgezeigt. Im Fall eines Zweikamerasystems findet eine Verbesserung der Punktzuordnung statt. Diese basiert insbesondere auf der Berücksichtigung der Oberfläche der Objekte. Diese Teilaufgaben werden in den einleitenden Abschnitten der drei Teile genau spezifiziert. Aufbau der Arbeit Diese Diplomarbeit befasst sich mit drei verschiedenen Aspekten der Onlinemesssysteme WHEELWATCH und MoveINSPECT. Aus diesem Grund besteht dieses Dokument aus drei großen Teilen. Der Teil I trägt den Titel “Genauigkeitsbeurteilung von optischen Onlinemesssystemen”. Darunter befindet sich die Entwicklung von zwei Testverfahren, die speziell für die Systeme WHEELWATCH und MoveINSPECT konzipiert werden. Dabei beziehen sich die Testverfahren zur Untersuchung der Genauigkeit zum einen auf Strecken und zum anderen auf Winkel. Die genaue Vorgehensweise ist dem ersten Teil dieser Arbeit zu entnehmen. Die beiden folgenden Teile befassen sich mit der Verbesserung der Algorithmik der Onlinemesssysteme. Dabei stellt der Teil Il Alternativen zum Räumlicher Rückwärtsschnitt (RRS) des Systems WHEEL WATCH vor. Das Ziel dieses Abschnitts ist es das bisher von der Firma AICON 3D Systems GmbH implementierte iterative Verfahren des RRS durch ein direktes zu erweitern. Die direkte Lösung des RRS dient dann zur Bestimmung der Näherungswerte für das iterative Verfahren. Mit der Algorithmik des Systems MoveINSPECT befasst sich der Teil III. Hier werden neue Ansatzmöglichkeiten aufgezeigt, um das Problem der Zuordnung von uncodierten Marken bei einem Zweikamerasystem zu verringern.

    @MastersThesis{abendroth2009genauigkeitsbeurteilung,
    title = {Genauigkeitsbeurteilung und Untersuchungen der Zuverl\"assigkeit von optischen Onlinemessungen},
    author = {Abendroth, Birgit and zur M\"uhlen, Miriam},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2009},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Inform. Timo Dickscheid, Dipl.-Ing. Robert Godding},
    type = {Diploma Thesis},
    abstract = {Vorwort Der Titel "Genauigkeitsbeurteilung und Untersuchungen der Zuverl\"assigkeit von optischen Onlinemessungen" impliziert eine weite Bandbreite an Untersuchungsm\"oglichkeiten. Diese allgemeine Einf\"uhrung gibt einen \"Uberblick \"uber die untersuchten Aspekte dieser Arbeit. Neben der Motivation, die zu der Entstehung dieser Diplomarbeit gef\"uhrt hat, beinhaltet diese Einf\"uhrung eine grobe Gliederung der behandelten Themenschwerpunkte. Motivation Ein neues Aufgabengebiet innerhalb der Nahbereichsphotogrammetrie bietet die Konzeption von photogrammetrischen Messsystemen f\"ur industrielle Aufgabenstellungen. Die Firma AICON 3D Systems GmbH, mit deren Kooperation diese Arbeit entstand, hat sich auf die Entwicklung solcher Systeme spezialisiert. Sie geh\"ort zu den weltweit f\"uhrenden Unternehmen im Bereich der optischen kamerabasierten 3D-Vermessung. Ihr Anspruch ist es, hochgenaue und effiziente Produkte im Bereich von Inspektion und Pr\"ufung zu entwickeln und zu \"uberwachen. Ihre Produkte vertreibt das 1990 gegr\"undete Unternehmen \"uberwiegend in der Automobil-, Luft- und Raumfahrtindustrie sowie im Anlagen- und Schiffsbau. Zur Erfassung von dynamischen Vorg\"angen bietet das Unternehmen echtzeitf\"ahige optische Messsysteme an, die je nach Konfiguration in der Lage sind einzelne signalisierte Punkte als 3D-Koordinaten zu erfassen oder die Bewegung eines Starrk\"orpers aufzunehmen. Damit diese photogrammetrischen Systeme gegen\"uber anderen Messsystemen im Konkurrenzkampf bestehen k\"onnen, m\"ussen sich diese einer st\"andigen Weiterentwicklung und Verbesserung unterziehen. Dabei steht insbesondere die Wirtschaftlichkeit, Zuverl\"assigkeit und die Genauigkeit der Systeme im Vordergrund. Au{\ss}erdem ist es n\"otig einzelne Messsysteme zu charakterisieren, um sie vergleichbar zu machen und die Einsatzm\"oglichkeiten aufzuzeigen. Dazu geh\"ort neben den oben genannten Kriterien der Genauigkeit, Zuverl\"assigkeit und Wirtschaftlichkeit auch das Spektrum der Einsatzm\"oglichkeiten mit systemspezifischen Rahmenbedingungen. Des Weiteren kann ein Vergleich \"uber Hardware- und Software-Module geschehen. Im weiteren Verlauf dieser Arbeit werden die Eigenschaften der Genauigkeit und der Software-Module n\"aher untersucht. Dabei zeigt eine Genauigkeitsuntersuchung die Grenzen der Messsysteme auf, deren Kenntnis f\"ur die Weiterentwicklung von Bedeutung ist. F\"ur die Verbesserung der Software wird diese anhand ihrer vorhandenen Algorithmik untersucht und mit alternativen Berechnungverfahren verglichen. Als Ausgangspunkt f\"ur diese Untersuchungen dienen dabei die beiden Onlinemesssysteme WHEELWATCH und MoveINSPECT der Firma AICON 3D Systems GmbH. Die Motivation der Firma AICON 3D Systems GmbH ein Diplomarbeitsthema im Bereich einer Genauigkeitsuntersuchung zu stellen, liegt darin, das vorhandene theoretische 'Wissen der Universit\"at mit dem praktischen Anwendungsbeispiel der Onlinemesssysteme zu verbinden. Dies gilt auch
    f\"ur den Bereich der Weiterentwicklung der Algorithmik. Damit die Systeme auch in Zukunft wettbewerbsf\"ahig sind, m\"ussen diese st\"andig weiter entwickelt werden. Aus diesem Grund beinhaltet diese Arbeit die Untersuchung von zwei verschiedenen Problemstellungen, die sich innerhalb der Algorithmen der Systeme ergeben. Aufgabenstellung Das Ziel dieser gesamten Diplomarbeit besteht in der Verbesserung und Weiterentwicklung von Onlinemesssystemen. Dabei sollen theoretische Verfahren, die an der Universit\"at vermittelt werden, auf die speziellen Messsysteme WHEELWATCH und MOVEINSPECT der Firma AICON 3D Systems GmbH angepasst und modifiziert werden. Insbesonders geht es um drei Aspekte der Onlinemesssysteme. Als erstes soll eine Gnauigkeitsuntersuchung der Onlinemesssysteme in Hinblick auf die Bewegungserfassung von Starrk\"orpern durchgef\"uhrt werden. Hierbei dienen statistische Grundlagen dazu, die bisherigen Genauigkeitsangaben von AICON 3D Systems GmbH durch eine statistisch fundierte Angabe zu validieren. Die allgemeine Problemstellung bezieht sich auf die Entwicklung eines Testverfahrens f\"ur die Spezifikation von Genauigkeitsangaben der detektierten Bewegung von starren Objekten, die sich im Nahbereich des Messsystems befinden. Unter Nahbereich ist hier eine maximale Entfernung von bis zu 3m zu verstehen. Die n\"achsten zwei Teilaspekte bestehen in der Beurteilung der bestehenden Algorithmik und dessen Verbesserung durch alternative L\"osungsans\"atze. Hier handelt es sich um zwei allgemeine Probleme. F\"ur das Einkamerasystem werden direkte L\"osungsm\"oglichkeiten des R\"aumlichen R\"uckw\"artsschnittes aufgezeigt. Im Fall eines Zweikamerasystems findet eine Verbesserung der Punktzuordnung statt. Diese basiert insbesondere auf der Ber\"ucksichtigung der Oberfl\"ache der Objekte. Diese Teilaufgaben werden in den einleitenden Abschnitten der drei Teile genau spezifiziert. Aufbau der Arbeit Diese Diplomarbeit befasst sich mit drei verschiedenen Aspekten der Onlinemesssysteme WHEELWATCH und MoveINSPECT. Aus diesem Grund besteht dieses Dokument aus drei gro{\ss}en Teilen. Der Teil I tr\"agt den Titel "Genauigkeitsbeurteilung von optischen Onlinemesssystemen". Darunter befindet sich die Entwicklung von zwei Testverfahren, die speziell f\"ur die Systeme WHEELWATCH und MoveINSPECT konzipiert werden. Dabei beziehen sich die Testverfahren zur Untersuchung der Genauigkeit zum einen auf Strecken und zum anderen auf Winkel. Die genaue Vorgehensweise ist dem ersten Teil dieser Arbeit zu entnehmen. Die beiden folgenden Teile befassen sich mit der Verbesserung der Algorithmik der Onlinemesssysteme. Dabei stellt der Teil Il Alternativen zum R\"aumlicher R\"uckw\"artsschnitt (RRS) des Systems WHEEL WATCH vor. Das Ziel dieses Abschnitts ist es das bisher von der Firma AICON 3D Systems GmbH implementierte iterative Verfahren des RRS durch ein direktes zu erweitern. Die direkte L\"osung des RRS dient dann zur Bestimmung der
    N\"aherungswerte f\"ur das iterative Verfahren. Mit der Algorithmik des Systems MoveINSPECT befasst sich der Teil III. Hier werden neue Ansatzm\"oglichkeiten aufgezeigt, um das Problem der Zuordnung von uncodierten Marken bei einem Zweikamerasystem zu verringern.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Abendroth2009Genauigkeitsbeurteilung.pdf},
    }

  • A. Barth, J. Siegemund, U. Franke, and W. Förstner, “Simultaneous Estimation of Pose and Motion at Highly Dynamic Turn Maneuvers,” in 31th Annual Symposium of the German Association for Pattern Recognition (DAGM), Jena, Germany, 2009, p. 262–271. doi:10.1007/978-3-642-03798-6_27
    [BibTeX] [PDF]

    Abstract. The (Extended) Kalman filter has been established as a stan- dard method for object tracking. While a constraining motion model stabilizes the tracking results given noisy measurements, it limits the ability to follow an object in non-modeled maneuvers. In the context of a stereo-vision based vehicle tracking approach, we propose and compare three different strategies to automatically adapt the dynamics of the fil- ter to the dynamics of the object. These strategies include an IMM-based multi-filter setup, an extension of the motion model considering higher order terms, as well as the adaptive parametrization of the filter vari- ances using an independent maximum likelihood estimator. For evalua- tion, various recorded real world trajectories and simulated maneuvers, including skidding, are used. The experimental results show significant improvements in the simultaneous estimation of pose and motion.

    @InProceedings{barth2009simultaneous,
    title = {Simultaneous Estimation of Pose and Motion at Highly Dynamic Turn Maneuvers},
    author = {Barth, Alexander and Siegemund, Jan and Franke, Uwe and F\"orstner, Wolfgang},
    booktitle = {31th Annual Symposium of the German Association for Pattern Recognition (DAGM)},
    year = {2009},
    address = {Jena, Germany},
    editor = {Denzler, J. and Notni, G.},
    pages = {262--271},
    publisher = {Springer},
    abstract = {Abstract. The (Extended) Kalman filter has been established as a stan- dard method for object tracking. While a constraining motion model stabilizes the tracking results given noisy measurements, it limits the ability to follow an object in non-modeled maneuvers. In the context of a stereo-vision based vehicle tracking approach, we propose and compare three different strategies to automatically adapt the dynamics of the fil- ter to the dynamics of the object. These strategies include an IMM-based multi-filter setup, an extension of the motion model considering higher order terms, as well as the adaptive parametrization of the filter vari- ances using an independent maximum likelihood estimator. For evalua- tion, various recorded real world trajectories and simulated maneuvers, including skidding, are used. The experimental results show significant improvements in the simultaneous estimation of pose and motion.},
    doi = {10.1007/978-3-642-03798-6_27},
    url = {https://www.ipb.uni-bonn.de/pdfs/Bart2009Simultaneous.pdf},
    }

  • S. D. Bauer, F. Korč, and W. Förstner, “Investigation into the classification of diseases of sugar beet leaves using multispectral images,” in Precision Agriculture 2009, Wageningen, 2009, p. 229–238.
    [BibTeX] [PDF]

    This paper reports on methods for the automatic detection and classification of leaf diseases based on high resolution multispectral images. Leaf diseases are economically important as they could cause a yield loss. Early and reliable detection of leaf diseases therefore is of utmost practical relevance – especially in the context of precision agriculture for localized treatment with fungicides. Our interest is the analysis of sugar beet due to their economical impact. Leaves of sugar beet may be infected by several diseases, such as rust (Uromyces betae), powdery mildew (Erysiphe betae) and other leaf spot diseases (Cercospora beticola and Ramularia beticola). In order to obtain best classification results we apply conditional random fields. In contrast to pixel based classifiers we are able to model the local context and contrary to object centred classifiers we simultaneously segment and classify the image. In a first investigation we analyse multispectral images of single leaves taken in a lab under well controlled illumination conditions. The photographed sugar beet leaves are healthy or either infected with the leaf spot pathogen Cercospora beticola or with the rust fungus Uromyces betae. We compare the classification methods pixelwise maximum posterior classification (MAP), objectwise MAP as soon as global MAP and global maximum posterior marginal classification using the spatial context within a conditional random field model.

    @InProceedings{bauer2009investigation,
    title = {Investigation into the classification of diseases of sugar beet leaves using multispectral images},
    author = {Bauer, Sabine Daniela and Kor{\vc}, Filip and F\"orstner, Wolfgang},
    booktitle = {Precision Agriculture 2009},
    year = {2009},
    address = {Wageningen},
    pages = {229--238},
    abstract = {This paper reports on methods for the automatic detection and classification of leaf diseases based on high resolution multispectral images. Leaf diseases are economically important as they could cause a yield loss. Early and reliable detection of leaf diseases therefore is of utmost practical relevance - especially in the context of precision agriculture for localized treatment with fungicides. Our interest is the analysis of sugar beet due to their economical impact. Leaves of sugar beet may be infected by several diseases, such as rust (Uromyces betae), powdery mildew (Erysiphe betae) and other leaf spot diseases (Cercospora beticola and Ramularia beticola). In order to obtain best classification results we apply conditional random fields. In contrast to pixel based classifiers we are able to model the local context and contrary to object centred classifiers we simultaneously segment and classify the image. In a first investigation we analyse multispectral images of single leaves taken in a lab under well controlled illumination conditions. The photographed sugar beet leaves are healthy or either infected with the leaf spot pathogen Cercospora beticola or with the rust fungus Uromyces betae. We compare the classification methods pixelwise maximum posterior classification (MAP), objectwise MAP as soon as global MAP and global maximum posterior marginal classification using the spatial context within a conditional random field model.},
    city = {Bonn},
    proceeding = {Precision Agriculture},
    url = {https://www.ipb.uni-bonn.de/pdfs/Bauer2009Investigation.pdf},
    }

  • D. Bender, “3D-Rekonstruktion von Blatträndern,” Diploma Thesis Master Thesis, 2009.
    [BibTeX]

    \textbf{Einleitung} Der Anbau von Pflanzen in der Landwirtschaft ist durch eine zunehmende Automatisierung geprägt. Unter anderem werden hierbei Verfahren der Bildverarbeitung eingesetzt, welche zum Beispiel eine Beobachtung von Wachstum, Krankheiten oder Reifegrad der Pflanze sowie die Erkennung von Unkraut ermöglichen. Ausgehend von den Ergebnissen kann eine optimierte Produktion vollzogen und infolgedessen der Ertrag erhöht werden. Bereits an diesen Einsatzgebieten lässt sich erkennen, warum ein großes Interesse an der Verwendung von Bildverarbeitungsverfahren beim Pflanzenanbau besteht. \textbf{1.1 AufgabensteIlung} Ziel dieser Diplomarbeit ist es, eine Anwendung zu entwickeln, welche die automatische 3-D-Rekonstruktion von Blatträndern ermöglicht. Dazu wird, aufbauend auf die 2-D-Konturen mehrerer Aufnahmen eines Blattes, ein Energieminimierungsansatz entwickelt, durch den die optimale 3-D-Kontur berechnet werden kann. Dieses Verfahren wird mit realen Aufnahmen von Rübenblättern getestet, wodurch jedoch nur ein visueller Eindruck über die Qualität der Ergebnisse gewonnen werden kann. Um fundierte Aussagen über die Qualität der Ergebnisse treffen zu können, soll im Anschluss ein Verfahren zur Erstellung von synthetischen Szenen erarbeitet und mit diesen eine statistische Auswertung vollzogen werden. \textbf{1.2 Motivation} Als Silhouette wird der Umriss eines abgebildeten Körpers beschrieben. Sie ist in den meisten Aufnahmen leicht zu extrahieren und häufig der stärkste Hinweis für das abgebildete 3-D-Modell. Aus diesem Grund wird in verschiedenen Verfahren zur vollständigen 3-D-Rekonstruktion die Projektion des 3-D-Modells auf die Silhouetten der Aufnahmen als Kriterium verwendet [PZF05]. Bei Abbildungen von Blättern stimmt für Aufnahmen ohne Scheinkonturen die Silhouette mit der jeweiligen Kontur des Blattes überein. Dies ermöglicht eine Rekonstruktion des Blattrandes durch den in der vorliegenden Arbeit beschriebenen Algorithmus. Ausgehend von dieser Kurve im 3-D-Raum können bereits Aussagen über das Wachstum einer Pflanze getroffen oder bestimmte Klassifizierungen vorgenommen werden. Des Weiteren kann basierend auf der berechneten 3- D- Kontur eine vollständige 3- D- Rekonstruktion vollzogen werden. Insbesondere bei Blättern sind hierbei der Einfluss der Beleuchtung und hierdurch auftretende Spiegelungen innerhalb des Blattes zu beachten, welche die komplette 3-D-Rekonstruktion erheblich erschweren. Möglicherweise kann die Qualität einer kompletten 3-D-Rekonstruktion durch die Übergabe der bekannten 3-D-Kontur verbessert werden. \textbf{1.3 Verwandte Arbeiten} Bei der Rekonstruktion von 3-D-Kurven durch ihre Abbildungen in mehrere Bilder handelt es sich um ein Problem, welches bis zum aktuellen Zeitpunkt noch nicht umfassend erforscht worden ist. Jedoch sind vereinzelt Arbeiten zu finden, welche die Fragestellung bearbeiten. Von diesen werden im Folgenden zwei Arbeiten kurz beschrieben, deren Hauptaugenmerk ebenfalls auf der 3-D-Rekonstruktion von Blätträndern liegt: In [ZWZY08] wird ein Verfahren zur 3-D-Rekonstruktion von Maispflanzen vorgestellt, wobei die Rekonstruktion eines Blattes (Abbildung 1.1) auf zwei Aufnahmen basiert. In diesen werden mithilfe des Canny-Algorithmus [Can86] Kanten extrahiert, aus welchen eine automatische Auswahl getroffen wird. Die anschließende Zuordnung homologer Kanten wird jedoch manuell vollzogen. Es folgen die 3-D-Rekonstruktionen des Blattrandes und der in einer Maispflanze zentral verlaufenden Blattader durch ein Schneiden der Kurven im Raum. Anschließend wird in der Arbeit eine Oberfläche ausgehend von den gefundenen 3-D-Konturen triangliert. Dies ist möglich, da Blätter von Maispflanzen sehr schmal sind und daher die rekonstruierten 3-D-Konturen nahe beieinander liegen. In [Nie04] wird die 3-D-Rekonstruktion von Blättern junger Maispflanzen mit NURBS [PT96] vollzogen. Dabei werden zunächst die Konturen der Blätter manuell gekennzeichnet, um anschließend als Eingabe für die Konstruktion des 3-D-Modells zu dienen. Die theoretische Grundlage ist ein Verfahren, das für eine spezielle Konfiguration von drei Kameras eine 3-D-NURBS-Kurve eines freigeformten, linienähnlichen Objektes konstruiert [DXP+ü3]. Zunächst wird dazu die Abbildung des Objektes in den jeweiligen Bildern in Form von 2-D-NURBS-Kurven approximiert. Sind diese in allen Bildern durch eine gleiche Anzahl von Kontrollpunkten und einen übereinstimmenden Knotenvektor dargestellt, so können die Kontrollpunkte im 3-D-Raum rekonstruiert werden und führen zur gesuchten Rekonstruktion durch eine 3-D-NURBS-Kurve. \textbf{1.4 Aufbau der Arbeit} Zu Beginn wird in Kapitel 2 die Vorverarbeitung der Eingabebilder beschrieben. In diesen wird zunächst mit einem Graph-Cut- Verfahren das Blatt segmentiert und anschließend seine Kontur extrahiert. Es folgt die Berechnung einer Distanztransformation des Konturbildes, wodurch für jeden Bildpunkt der Abstand zur Kontur angegeben wird. In Kapitel 3 wird der in dieser Arbeit vorgestellte Algorithmus zur 3-D-Rekonstruktion des Blattrandes beschrieben. Anschließend werden in Kapitel 4 die Erstellung einer synthetischen Szene zur Bewertung der Ergebnisse und die verwendeten Mittel zur statistischen Auswertung der Fehler dargestellt. In Kapitel 5 werden für reale und synthetische Bilder die Ergebnisse durchgeführter Experimente präsentiert und erörtert. Zum Abschluss der Arbeit folgen in Kapitel 6 eine Zusammenfassung und ein Ausblick auf mögliche Weiterführungen und Alternativen des vorgestellten Verfahrens.

    @MastersThesis{bender20093d,
    title = {3D-Rekonstruktion von Blattr\"andern},
    author = {Bender, Daniel},
    school = {University of Bonn},
    year = {2009},
    note = {Betreuung: Prof.Dr.-Ing. Wolfgang F\"orstner, Prof.Dr. Daniel Cremers},
    type = {Diploma Thesis},
    abstract = {\textbf{Einleitung} Der Anbau von Pflanzen in der Landwirtschaft ist durch eine zunehmende Automatisierung gepr\"agt. Unter anderem werden hierbei Verfahren der Bildverarbeitung eingesetzt, welche zum Beispiel eine Beobachtung von Wachstum, Krankheiten oder Reifegrad der Pflanze sowie die Erkennung von Unkraut erm\"oglichen. Ausgehend von den Ergebnissen kann eine optimierte Produktion vollzogen und infolgedessen der Ertrag erh\"oht werden. Bereits an diesen Einsatzgebieten l\"asst sich erkennen, warum ein gro{\ss}es Interesse an der Verwendung von Bildverarbeitungsverfahren beim Pflanzenanbau besteht. \textbf{1.1 AufgabensteIlung} Ziel dieser Diplomarbeit ist es, eine Anwendung zu entwickeln, welche die automatische 3-D-Rekonstruktion von Blattr\"andern erm\"oglicht. Dazu wird, aufbauend auf die 2-D-Konturen mehrerer Aufnahmen eines Blattes, ein Energieminimierungsansatz entwickelt, durch den die optimale 3-D-Kontur berechnet werden kann. Dieses Verfahren wird mit realen Aufnahmen von R\"ubenbl\"attern getestet, wodurch jedoch nur ein visueller Eindruck \"uber die Qualit\"at der Ergebnisse gewonnen werden kann. Um fundierte Aussagen \"uber die Qualit\"at der Ergebnisse treffen zu k\"onnen, soll im Anschluss ein Verfahren zur Erstellung von synthetischen Szenen erarbeitet und mit diesen eine statistische Auswertung vollzogen werden. \textbf{1.2 Motivation} Als Silhouette wird der Umriss eines abgebildeten K\"orpers beschrieben. Sie ist in den meisten Aufnahmen leicht zu extrahieren und h\"aufig der st\"arkste Hinweis f\"ur das abgebildete 3-D-Modell. Aus diesem Grund wird in verschiedenen Verfahren zur vollst\"andigen 3-D-Rekonstruktion die Projektion des 3-D-Modells auf die Silhouetten der Aufnahmen als Kriterium verwendet [PZF05]. Bei Abbildungen von Bl\"attern stimmt f\"ur Aufnahmen ohne Scheinkonturen die Silhouette mit der jeweiligen Kontur des Blattes \"uberein. Dies erm\"oglicht eine Rekonstruktion des Blattrandes durch den in der vorliegenden Arbeit beschriebenen Algorithmus. Ausgehend von dieser Kurve im 3-D-Raum k\"onnen bereits Aussagen \"uber das Wachstum einer Pflanze getroffen oder bestimmte Klassifizierungen vorgenommen werden. Des Weiteren kann basierend auf der berechneten 3- D- Kontur eine vollst\"andige 3- D- Rekonstruktion vollzogen werden. Insbesondere bei Bl\"attern sind hierbei der Einfluss der Beleuchtung und hierdurch auftretende Spiegelungen innerhalb des Blattes zu beachten, welche die komplette 3-D-Rekonstruktion erheblich erschweren. M\"oglicherweise kann die Qualit\"at einer kompletten 3-D-Rekonstruktion durch die \"Ubergabe der bekannten 3-D-Kontur verbessert werden. \textbf{1.3 Verwandte Arbeiten} Bei der Rekonstruktion von 3-D-Kurven durch ihre Abbildungen in mehrere Bilder handelt es sich um ein Problem, welches bis zum aktuellen Zeitpunkt noch nicht umfassend erforscht worden ist. Jedoch sind vereinzelt Arbeiten zu finden, welche die Fragestellung bearbeiten. Von diesen werden im Folgenden
    zwei Arbeiten kurz beschrieben, deren Hauptaugenmerk ebenfalls auf der 3-D-Rekonstruktion von Bl\"attr\"andern liegt: In [ZWZY08] wird ein Verfahren zur 3-D-Rekonstruktion von Maispflanzen vorgestellt, wobei die Rekonstruktion eines Blattes (Abbildung 1.1) auf zwei Aufnahmen basiert. In diesen werden mithilfe des Canny-Algorithmus [Can86] Kanten extrahiert, aus welchen eine automatische Auswahl getroffen wird. Die anschlie{\ss}ende Zuordnung homologer Kanten wird jedoch manuell vollzogen. Es folgen die 3-D-Rekonstruktionen des Blattrandes und der in einer Maispflanze zentral verlaufenden Blattader durch ein Schneiden der Kurven im Raum. Anschlie{\ss}end wird in der Arbeit eine Oberfl\"ache ausgehend von den gefundenen 3-D-Konturen triangliert. Dies ist m\"oglich, da Bl\"atter von Maispflanzen sehr schmal sind und daher die rekonstruierten 3-D-Konturen nahe beieinander liegen. In [Nie04] wird die 3-D-Rekonstruktion von Bl\"attern junger Maispflanzen mit NURBS [PT96] vollzogen. Dabei werden zun\"achst die Konturen der Bl\"atter manuell gekennzeichnet, um anschlie{\ss}end als Eingabe f\"ur die Konstruktion des 3-D-Modells zu dienen. Die theoretische Grundlage ist ein Verfahren, das f\"ur eine spezielle Konfiguration von drei Kameras eine 3-D-NURBS-Kurve eines freigeformten, linien\"ahnlichen Objektes konstruiert [DXP+\"u3]. Zun\"achst wird dazu die Abbildung des Objektes in den jeweiligen Bildern in Form von 2-D-NURBS-Kurven approximiert. Sind diese in allen Bildern durch eine gleiche Anzahl von Kontrollpunkten und einen \"ubereinstimmenden Knotenvektor dargestellt, so k\"onnen die Kontrollpunkte im 3-D-Raum rekonstruiert werden und f\"uhren zur gesuchten Rekonstruktion durch eine 3-D-NURBS-Kurve. \textbf{1.4 Aufbau der Arbeit} Zu Beginn wird in Kapitel 2 die Vorverarbeitung der Eingabebilder beschrieben. In diesen wird zun\"achst mit einem Graph-Cut- Verfahren das Blatt segmentiert und anschlie{\ss}end seine Kontur extrahiert. Es folgt die Berechnung einer Distanztransformation des Konturbildes, wodurch f\"ur jeden Bildpunkt der Abstand zur Kontur angegeben wird. In Kapitel 3 wird der in dieser Arbeit vorgestellte Algorithmus zur 3-D-Rekonstruktion des Blattrandes beschrieben. Anschlie{\ss}end werden in Kapitel 4 die Erstellung einer synthetischen Szene zur Bewertung der Ergebnisse und die verwendeten Mittel zur statistischen Auswertung der Fehler dargestellt. In Kapitel 5 werden f\"ur reale und synthetische Bilder die Ergebnisse durchgef\"uhrter Experimente pr\"asentiert und er\"ortert. Zum Abschluss der Arbeit folgen in Kapitel 6 eine Zusammenfassung und ein Ausblick auf m\"ogliche Weiterf\"uhrungen und Alternativen des vorgestellten Verfahrens.},
    }

  • M. Bennewitz, C. Stachniss, S. Behnke, and W. Burgard, “Utilizing Reflection Properties of Surfaces to Improve Mobile Robot Localization,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Kobe, Japan, 2009.
    [BibTeX]
    [none]
    @InProceedings{bennewitz2009,
    title = {Utilizing Reflection Properties of Surfaces to Improve Mobile Robot Localization},
    author = {M. Bennewitz and Stachniss, C. and Behnke, S. and Burgard, W.},
    booktitle = icra,
    year = {2009},
    address = {Kobe, Japan},
    abstract = {[none]},
    timestamp = {2014.04.24},
    }

  • W. Burgard, C. Stachniss, G. Grisetti, B. Steder, R. Kümmerle, C. Dornhege, M. Ruhnke, A. Kleiner, and J. D. Tardós, “A Comparison of SLAM Algorithms Based on a Graph of Relations,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2009.
    [BibTeX] [PDF]
    [none]
    @InProceedings{burgard2009,
    title = {A Comparison of {SLAM} Algorithms Based on a Graph of Relations},
    author = {W. Burgard and C. Stachniss and G. Grisetti and B. Steder and R. K\"ummerle and C. Dornhege and M. Ruhnke and A. Kleiner and J.D. Tard\'os},
    booktitle = iros,
    year = {2009},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/burgard09iros.pdf},
    }

  • Dickscheid and W. Förstner, “Evaluating the Suitability of Feature Detectors for Automatic Image Orientation Systems,” in 7th International Conf. on Computer Vision Systems (ICVS’09)., Liege, Belgium, 2009, p. 305–314. doi:10.1007/978-3-642-04667-4_31
    [BibTeX] [PDF]

    We investigate the suitability of different local feature detectors for the task of automatic image orientation under different scene texturings. Building on an existing system for image orientation, we vary the applied operators while keeping the strategy xed, and evaluate the results. An emphasis is put on the effect of combining detectors for calibrating diffcult datasets. Besides some of the most popular scale and affine invariant detectors available, we include two recently proposed operators in the setup: A scale invariant junction detector and a scale invariant detector based on the local entropy of image patches. After describing the system, we present a detailed performance analysis of the different operators on a number of image datasets. We both analyze ground-truth-deviations and results of a nal bundle adjustment, including observations, 3D object points and camera poses. The paper concludes with hints on the suitability of the different combinations of detectors, and an assessment of the potential of such automatic orientation procedures.

    @InProceedings{dickscheid2009evaluating,
    title = {Evaluating the Suitability of Feature Detectors for Automatic Image Orientation Systems},
    author = {Dickscheid, and F\"orstner, Wolfgang},
    booktitle = {7th International Conf. on Computer Vision Systems (ICVS'09).},
    year = {2009},
    address = {Liege, Belgium},
    editor = {Mario Fritz and Bernt Schiele and Justus H. Piater},
    pages = {305--314},
    publisher = {Springer},
    series = {Lecture Notes in Computer Science},
    volume = {5815},
    abstract = {We investigate the suitability of different local feature detectors for the task of automatic image orientation under different scene texturings. Building on an existing system for image orientation, we vary the applied operators while keeping the strategy xed, and evaluate the results. An emphasis is put on the effect of combining detectors for calibrating diffcult datasets. Besides some of the most popular scale and affine invariant detectors available, we include two recently proposed operators in the setup: A scale invariant junction detector and a scale invariant detector based on the local entropy of image patches. After describing the system, we present a detailed performance analysis of the different operators on a number of image datasets. We both analyze ground-truth-deviations and results of a nal bundle adjustment, including observations, 3D object points and camera poses. The paper concludes with hints on the suitability of the different combinations of detectors, and an assessment of the potential of such automatic orientation procedures.},
    doi = {10.1007/978-3-642-04667-4_31},
    isbn = {978-3-642-04666-7},
    location = {Heidelberg},
    url = {https://www.ipb.uni-bonn.de/pdfs/Dickscheid2009Evaluating.pdf},
    }

  • M. Drauschke, “Documentation: Segmentation and Graph Construction of HMRF,” Department of Photogrammetry, University of Bonn, TR-IGG-P-2009-03, 2009.
    [BibTeX] [PDF]

    This is a technical report for presenting a documentation on the segmentation and the graph construction of a Hierarchical Markov random eld (HMRF). The segmentation is based on multiscale analysis and watershed regions as presented in [Drauschke et al., 2006]. The region’s development is tracked over the scales, which de nes a region hierarchy graph. This graph is used to improve the segmentation by reforming the regions geometrically more precisely. This work is taken from [Drauschke, 2009]. Furthermore, we determine a region adjacency graph from each image partition of all scales. The detected image regions, their adjacent regions and their hierarchical neighbors are saved into an xml- fille for a convenient output.

    @TechReport{drauschke2009documentation,
    title = {Documentation: Segmentation and Graph Construction of HMRF},
    author = {Drauschke, Martin},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2009},
    number = {TR-IGG-P-2009-03},
    abstract = {This is a technical report for presenting a documentation on the segmentation and the graph construction of a Hierarchical Markov random eld (HMRF). The segmentation is based on multiscale analysis and watershed regions as presented in [Drauschke et al., 2006]. The region's development is tracked over the scales, which de nes a region hierarchy graph. This graph is used to improve the segmentation by reforming the regions geometrically more precisely. This work is taken from [Drauschke, 2009]. Furthermore, we determine a region adjacency graph from each image partition of all scales. The detected image regions, their adjacent regions and their hierarchical neighbors are saved into an xml- fille for a convenient output.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Drauschke2009Documentation.pdf},
    }

  • M. Drauschke, “An Irregular Pyramid for Multi-scale Analysis of Objects and their Parts,” in 7th IAPR-TC-15 Workshop on Graph-based Representations in Pattern Recognition, Venice, Italy, 2009, p. 293–303. doi:10.1007/978-3-642-02124-4_30
    [BibTeX] [PDF]

    We present an irregular image pyramid which is derived from multi-scale analysis of segmented watershed regions. Our framework is based on the development of regions in the Gaussian scale-space, which is represented by a region hierarchy graph. Using this structure, we are able to determine geometrically precise borders of our segmented regions using a region focusing. In order to handle the complexity, we select only stable regions and regions resulting from a merging event, which enables us to keep the hierarchical structure of the regions. Using this framework, we are able to detect objects of various scales in an image. Finally, the hierarchical structure is used for describing these detected regions as aggregations of their parts. We investigate the usefulness of the regions for interpreting images showing building facades with parts like windows, balconies or entrances.

    @InProceedings{drauschke2009irregular,
    title = {An Irregular Pyramid for Multi-scale Analysis of Objects and their Parts},
    author = {Drauschke, Martin},
    booktitle = {7th IAPR-TC-15 Workshop on Graph-based Representations in Pattern Recognition},
    year = {2009},
    address = {Venice, Italy},
    pages = {293--303},
    abstract = {We present an irregular image pyramid which is derived from multi-scale analysis of segmented watershed regions. Our framework is based on the development of regions in the Gaussian scale-space, which is represented by a region hierarchy graph. Using this structure, we are able to determine geometrically precise borders of our segmented regions using a region focusing. In order to handle the complexity, we select only stable regions and regions resulting from a merging event, which enables us to keep the hierarchical structure of the regions. Using this framework, we are able to detect objects of various scales in an image. Finally, the hierarchical structure is used for describing these detected regions as aggregations of their parts. We investigate the usefulness of the regions for interpreting images showing building facades with parts like windows, balconies or entrances.},
    city = {Bonn},
    doi = {10.1007/978-3-642-02124-4_30},
    url = {https://www.ipb.uni-bonn.de/pdfs/Drauschke2009Irregular.pdf},
    }

  • M. Drauschke, W. Förstner, and A. Brunn, “Multidodging: Ein effizienter Algorithmus zur automatischen Verbesserung von digitalisierten Luftbildern,” in Publikationen der DGPF, Band 18: Zukunft mit Tradition, Jena, 2009, p. 61–68.
    [BibTeX] [PDF]

    Wir haben ein effizientes, automatisches Verfahren zur Verbesserung von digitalisierten Luftbildern entwickelt. Das Verfahren MULTIDODGING dient im Kontext der visuellen Aufbereitung von historischen Aufnahmen aus dem 2. Weltkrieg. Bei der Bildverbesserung mittels MULTIDODGING wird das eingescannte Bild zunächst in sich nicht überlappende rechteckige Bildausschnitte unterteilt. In jedem Bildausschnitt wird eine Histogrammverebnung durchgeführt, die im Allgemeinen zu einer Verstärkung des Kontrasts führt. Durch die regionale Veränderung des Bildes entstehen sichtbare Grenzen zwischen den Bildausschnitten, die durch eine Interpolation entfernt werden. In der Anwendung des bisherigen Verfahrens hat sich gezeigt, dass der Kontrast in vielen lokalen Stellen zu stark ist. Deshalb kann zum Abschluss die Spannweite der Grauwerte zusätzlich reduziert werden, wobei diese Kontrastanpassung regional aus den Gradienten im Bildausschnitt berechnet wird. Dieser Beitrag beschreibt und analysiert das Verfahren im Detail.

    @InProceedings{drauschke2009multidodging,
    title = {Multidodging: Ein effizienter Algorithmus zur automatischen Verbesserung von digitalisierten Luftbildern},
    author = {Drauschke, Martin and F\"orstner, Wolfgang and Brunn, Ansgar},
    booktitle = {Publikationen der DGPF, Band 18: Zukunft mit Tradition},
    year = {2009},
    address = {Jena},
    pages = {61--68},
    abstract = {Wir haben ein effizientes, automatisches Verfahren zur Verbesserung von digitalisierten Luftbildern entwickelt. Das Verfahren MULTIDODGING dient im Kontext der visuellen Aufbereitung von historischen Aufnahmen aus dem 2. Weltkrieg. Bei der Bildverbesserung mittels MULTIDODGING wird das eingescannte Bild zun\"achst in sich nicht \"uberlappende rechteckige Bildausschnitte unterteilt. In jedem Bildausschnitt wird eine Histogrammverebnung durchgef\"uhrt, die im Allgemeinen zu einer Verst\"arkung des Kontrasts f\"uhrt. Durch die regionale Ver\"anderung des Bildes entstehen sichtbare Grenzen zwischen den Bildausschnitten, die durch eine Interpolation entfernt werden. In der Anwendung des bisherigen Verfahrens hat sich gezeigt, dass der Kontrast in vielen lokalen Stellen zu stark ist. Deshalb kann zum Abschluss die Spannweite der Grauwerte zus\"atzlich reduziert werden, wobei diese Kontrastanpassung regional aus den Gradienten im Bildausschnitt berechnet wird. Dieser Beitrag beschreibt und analysiert das Verfahren im Detail.},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Drauschke2009Multidodging.pdf},
    }

  • M. Drauschke, R. Roscher, T. Läbe, and W. Förstner, “Improving Image Segmentation using Multiple View Analysis,” in Object Extraction for 3D City Models, Road Databases and Traffic Monitoring – Concepts, Algorithms and Evaluatin (CMRT09), 2009, pp. 211-216.
    [BibTeX] [PDF]

    In our contribution, we improve image segmentation by integrating depth information from multi-view analysis. We assume the object surface in each region can be represented by a low order polynomial, and estimate the best fitting parameters of a plane using those points of the point cloud, which are mapped to the specific region. We can merge adjacent image regions, which cannot be distinguished geometrically. We demonstrate the approach for finding spatially planar regions on aerial images. Furthermore, we discuss the possibilities of extending of our approach towards segmenting terrestrial facade images.

    @InProceedings{drauschke2009improving,
    title = {Improving Image Segmentation using Multiple View Analysis},
    author = {Drauschke, Martin and Roscher, Ribana and L\"abe, Thomas and F\"orstner, Wolfgang},
    booktitle = {Object Extraction for 3D City Models, Road Databases and Traffic Monitoring - Concepts, Algorithms and Evaluatin (CMRT09)},
    year = {2009},
    pages = {211-216},
    abstract = {In our contribution, we improve image segmentation by integrating depth information from multi-view analysis. We assume the object surface in each region can be represented by a low order polynomial, and estimate the best fitting parameters of a plane using those points of the point cloud, which are mapped to the specific region. We can merge adjacent image regions, which cannot be distinguished geometrically. We demonstrate the approach for finding spatially planar regions on aerial images. Furthermore, we discuss the possibilities of extending of our approach towards segmenting terrestrial facade images.},
    city = {Paris},
    url = {https://www.ipb.uni-bonn.de/pdfs/Drauschke2009Improving.pdf},
    }

  • F. Endres, J. Hess, N. Franklin, C. Plagemann, C. Stachniss, and W. Burgard, “Estimating Range Information from Monocular Vision,” in Workshop Regression in Robotics – Approaches and Applications at Robotics: Science and Systems (RSS), Seattle, WA, USA, 2009.
    [BibTeX]
    [none]
    @InProceedings{endres2009,
    title = {Estimating Range Information from Monocular Vision},
    author = {Endres, F. and Hess, J. and Franklin, N. and Plagemann, C. and Stachniss, C. and Burgard, W.},
    booktitle = {Workshop Regression in Robotics - Approaches and Applications at Robotics: Science and Systems (RSS)},
    year = {2009},
    address = {Seattle, WA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    }

  • F. Endres, C. Plagemann, C. Stachniss, and W. Burgard, “Scene Analysis using Latent Dirichlet Allocation,” in Proc. of Robotics: Science and Systems (RSS), Seattle, WA, USA, 2009.
    [BibTeX] [PDF]
    [none]
    @InProceedings{endres2009a,
    title = {Scene Analysis using Latent Dirichlet Allocation},
    author = {F. Endres and C. Plagemann and Stachniss, C. and Burgard, W.},
    booktitle = rss,
    year = {2009},
    address = {Seattle, WA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/endres09rss-draft.pdf},
    }

  • C. Eppner, J. Sturm, M. Bennewitz, C. Stachniss, and W. Burgard, “Imitation Learning with Generalized Task Descriptions,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Kobe, Japan, 2009.
    [BibTeX]
    [none]
    @InProceedings{eppner2009,
    title = {Imitation Learning with Generalized Task Descriptions},
    author = {C. Eppner and J. Sturm and M. Bennewitz and Stachniss, C. and Burgard, W.},
    booktitle = icra,
    year = {2009},
    address = {Kobe, Japan},
    abstract = {[none]},
    timestamp = {2014.04.24},
    }

  • W. Förstner, “Computer Vision and Remote Sensing – Lessons Learned,” in Photogrammetric Week 2009, Heidelberg, 2009, p. 241–249.
    [BibTeX] [PDF]

    Photogrammetry has significantly been influenced by its two neigbouring fields, namely Computer Vision and Remote Sensing. Today, Photogrammetry has been become a part of Remote Sensing. The paper reflects its growing relations with Computer Vision, based on a more than 25 years experience of the author with the fascinating field between cognitive, natural and engineering science, which stimulated his own research and transferred him into a wanderer between two worlds.

    @InProceedings{forstner2009computer,
    title = {Computer Vision and Remote Sensing - Lessons Learned},
    author = {F\"orstner, Wolfgang},
    booktitle = {Photogrammetric Week 2009},
    year = {2009},
    address = {Heidelberg},
    pages = {241--249},
    abstract = {Photogrammetry has significantly been influenced by its two neigbouring fields, namely Computer Vision and Remote Sensing. Today, Photogrammetry has been become a part of Remote Sensing. The paper reflects its growing relations with Computer Vision, based on a more than 25 years experience of the author with the fascinating field between cognitive, natural and engineering science, which stimulated his own research and transferred him into a wanderer between two worlds.},
    city = {Stuttgart},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner2009Computer.pdf},
    note = {Slides are available at \url{https://www.ipb.uni-bonn.de/pdfs/Forstner2009Computer_slides.pdf} },
    }

  • W. Förstner, “Mustererkennung in der Fernerkundung,” in Publikationen der DGPF, Band 18: Zukunft mit Tradition, Jena, 2009, p. 129–136.
    [BibTeX] [PDF]

    Der Beitrag beleuchtet die Forschung in Photogrammetrie und Fernerkundung unter dem Blickwinkel der Methoden, die für die Lösung der zentrale Aufgabe beider Fachgebiete, der Bildinterpretation, erforderlich sind, sowohl zur Integration beider Gebiete, wie zu einer effizienten Gestaltung gemeinsamerer Forschung. Ingredienzien für erfolgreiche Forschung in diesem Bereich sind Fokussierung auf Themen, die in ca. eine Dekade bearbeitet werden können, enge Kooperation mit den fachlich angrenzenden Disziplinen – der Mustererkennung und dem maschinellen Lernen – , kompetetives Benchmarking, Softwareaustausch und Integration der Forschungsthemen in die Ausbildung. Der Beitrag skizziert ein Forschungsprogamm mit den Themen ‘Mustererkennung in der Fernerkundung’ und Interpretation von LIDARDaten das, interdisziplinär ausgerichtet, die Photogrammetrie mit den unmittelbaren Nachbardisziplinen zunehmend verweben könnte, und – nach Ansicht des Autors – zur Erhaltung der Innovationskraft auch dringend erforderlich ist.

    @InProceedings{forstner2009mustererkennung,
    title = {Mustererkennung in der Fernerkundung},
    author = {F\"orstner, Wolfgang},
    booktitle = {Publikationen der DGPF, Band 18: Zukunft mit Tradition},
    year = {2009},
    address = {Jena},
    pages = {129--136},
    abstract = {Der Beitrag beleuchtet die Forschung in Photogrammetrie und Fernerkundung unter dem Blickwinkel der Methoden, die f\"ur die L\"osung der zentrale Aufgabe beider Fachgebiete, der Bildinterpretation, erforderlich sind, sowohl zur Integration beider Gebiete, wie zu einer effizienten Gestaltung gemeinsamerer Forschung. Ingredienzien f\"ur erfolgreiche Forschung in diesem Bereich sind Fokussierung auf Themen, die in ca. eine Dekade bearbeitet werden k\"onnen, enge Kooperation mit den fachlich angrenzenden Disziplinen - der Mustererkennung und dem maschinellen Lernen - , kompetetives Benchmarking, Softwareaustausch und Integration der Forschungsthemen in die Ausbildung. Der Beitrag skizziert ein Forschungsprogamm mit den Themen 'Mustererkennung in der Fernerkundung' und Interpretation von LIDARDaten das, interdisziplin\"ar ausgerichtet, die Photogrammetrie mit den unmittelbaren Nachbardisziplinen zunehmend verweben k\"onnte, und - nach Ansicht des Autors - zur Erhaltung der Innovationskraft auch dringend erforderlich ist.},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner2009Mustererkennung.pdf},
    }

  • W. Förstner, T. Dickscheid, and F. Schindler, “On the Completeness of Coding with Image Features,” in 20th British Machine Vision Conf., London, UK, 2009. doi:10.5244/C.23.1
    [BibTeX] [PDF]

    We present a scheme for measuring completeness of local feature extraction in terms of image coding. Completeness is here considered as good coverage of relevant image information by the features. As each feature requires a certain number of bits which are representative for a certain subregion of the image, we interpret the coverage as a sparse coding scheme. The measure is therefore based on a comparison of two densities over the image domain: An entropy density pH(x) based on local image statistics, and a feature coding density pc(x) which is directly computed from each particular set of local features. Motivated by the coding scheme in JPEG, the entropy distribution is derived from the power spectrum of local patches around each pixel position in a statistically sound manner. As the total number of bits for coding the image and for representing it with local features may be different, we measure incompleteness by the Hellinger distance between pH(x) and pc(x). We will derive a procedure for measuring incompleteness of possibly mixed sets of local features and show results on standard datasets using some of the most popular region and keypoint detectors, including Lowe, MSER and the recently published SFOP detectors. Furthermore, we will draw some interesting conclusions about the complementarity of detectors.

    @InProceedings{forstner2009completeness,
    title = {On the Completeness of Coding with Image Features},
    author = {F\"orstner, Wolfgang and Dickscheid, Timo and Schindler, Falko},
    booktitle = {20th British Machine Vision Conf.},
    year = {2009},
    address = {London, UK},
    abstract = {We present a scheme for measuring completeness of local feature extraction in terms of image coding. Completeness is here considered as good coverage of relevant image information by the features. As each feature requires a certain number of bits which are representative for a certain subregion of the image, we interpret the coverage as a sparse coding scheme. The measure is therefore based on a comparison of two densities over the image domain: An entropy density pH(x) based on local image statistics, and a feature coding density pc(x) which is directly computed from each particular set of local features. Motivated by the coding scheme in JPEG, the entropy distribution is derived from the power spectrum of local patches around each pixel position in a statistically sound manner. As the total number of bits for coding the image and for representing it with local features may be different, we measure incompleteness by the Hellinger distance between pH(x) and pc(x). We will derive a procedure for measuring incompleteness of possibly mixed sets of local features and show results on standard datasets using some of the most popular region and keypoint detectors, including Lowe, MSER and the recently published SFOP detectors. Furthermore, we will draw some interesting conclusions about the complementarity of detectors.},
    doi = {10.5244/C.23.1},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner2009Completeness.pdf},
    }

  • W. Förstner, T. Dickscheid, and F. Schindler, “Detecting Interpretable and Accurate Scale-Invariant Keypoints,” in 12th IEEE International Conf. on Computer Vision (ICCV’09), Kyoto, Japan, 2009, p. 2256–2263. doi:10.1109/ICCV.2009.5459458
    [BibTeX] [PDF]

    This paper presents a novel method for detecting scale invariant keypoints. It fills a gap in the set of available methods, as it proposes a scale-selection mechanism for junction-type features. The method is a scale-space extension of the detector proposed by Förstner (1994) and uses the general spiral feature model of Bigün (1990) to unify different types of features within the same framework. By locally optimising the consistency of image regions with respect to the spiral model, we are able to detect and classify image structures with complementary properties over scalespace, especially star and circular shapes as interpretable and identifiable subclasses. Our motivation comes from calibrating images of structured scenes with poor texture, where blob detectors alone cannot find sufficiently many keypoints, while existing corner detectors fail due to the lack of scale invariance. The procedure can be controlled by semantically clear parameters. One obtains a set of keypoints with position, scale, type and consistency measure. We characterise the detector and show results on common benchmarks. It competes in repeatability with the Lowe detector, but finds more stable keypoints in poorly textured areas, and shows comparable or higher accuracy than other recent detectors. This makes it useful for both object recognition and camera calibration.

    @InProceedings{forstner2009detecting,
    title = {Detecting Interpretable and Accurate Scale-Invariant Keypoints},
    author = {F\"orstner, Wolfgang and Dickscheid, Timo and Schindler, Falko},
    booktitle = {12th IEEE International Conf. on Computer Vision (ICCV'09)},
    year = {2009},
    address = {Kyoto, Japan},
    pages = {2256--2263},
    abstract = {This paper presents a novel method for detecting scale invariant keypoints. It fills a gap in the set of available methods, as it proposes a scale-selection mechanism for junction-type features. The method is a scale-space extension of the detector proposed by F\"orstner (1994) and uses the general spiral feature model of Big\"un (1990) to unify different types of features within the same framework. By locally optimising the consistency of image regions with respect to the spiral model, we are able to detect and classify image structures with complementary properties over scalespace, especially star and circular shapes as interpretable and identifiable subclasses. Our motivation comes from calibrating images of structured scenes with poor texture, where blob detectors alone cannot find sufficiently many keypoints, while existing corner detectors fail due to the lack of scale invariance. The procedure can be controlled by semantically clear parameters. One obtains a set of keypoints with position, scale, type and consistency measure. We characterise the detector and show results on common benchmarks. It competes in repeatability with the Lowe detector, but finds more stable keypoints in poorly textured areas, and shows comparable or higher accuracy than other recent detectors. This makes it useful for both object recognition and camera calibration.},
    doi = {10.1109/ICCV.2009.5459458},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner2009Detectinga.pdf},
    }

  • B. Frank, C. Stachniss, R. Schmedding, W. Burgard, and M. Teschner, “Real-world Robot Navigation amongst Deformable Obstacles,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Kobe, Japan, 2009.
    [BibTeX]
    [none]
    @InProceedings{frank2009,
    title = {Real-world Robot Navigation amongst Deformable Obstacles},
    author = {B. Frank and C. Stachniss and R. Schmedding and W. Burgard and M. Teschner},
    booktitle = icra,
    year = {2009},
    address = {Kobe, Japan},
    abstract = {[none]},
    timestamp = {2014.04.24},
    }

  • G. Grisetti, C. Stachniss, and W. Burgard, “Non-linear Constraint Network Optimization for Efficient Map Learning,” , vol. 10, iss. 3, p. 428–439, 2009.
    [BibTeX] [PDF]
    [none]
    @Article{grisetti2009,
    title = {Non-linear Constraint Network Optimization for Efficient Map Learning},
    author = {Grisetti, G. and Stachniss, C. and Burgard, W.},
    journal = ieeeits,
    year = {2009},
    number = {3},
    pages = {428--439},
    volume = {10},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/grisetti09its.pdf},
    }

  • F. Korč and W. Förstner, “eTRIMS Image Database for Interpreting Images of Man-Made Scenes,” Department of Photogrammetry, University of Bonn, TR-IGG-P-2009-01, 2009.
    [BibTeX] [PDF]

    We describe ground truth data that we provide to serve as a basis for evaluation and comparison of supervised learning approaches to image interpretation. The provided ground truth, the eTRIMS Image Database, is a collection of annotated images of real world street scenes. Typical objects in these images are variable in shape and appearance, in the number of its parts and appear in a variety of con gurations. The domain of man-made scenes is thus well suited for evaluation and comparison of a variety of interpretation approaches, including those that employ structure models. The provided pixelwise ground truth assigns each image pixel both with a class label and an object label and o ffers thus ground truth annotation both on the level of pixels and regions. While we believe that such ground truth is of general interest in supervised learning, such data may be of further relevance in emerging real world applications involving automation of man-made scene interpretation.

    @TechReport{korvc2009etrims,
    title = {{eTRIMS} Image Database for Interpreting Images of Man-Made Scenes},
    author = {Kor{\vc}, Filip and F\"orstner, Wolfgang},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2009},
    month = apr,
    number = {TR-IGG-P-2009-01},
    abstract = {We describe ground truth data that we provide to serve as a basis for evaluation and comparison of supervised learning approaches to image interpretation. The provided ground truth, the eTRIMS Image Database, is a collection of annotated images of real world street scenes. Typical objects in these images are variable in shape and appearance, in the number of its parts and appear in a variety of con gurations. The domain of man-made scenes is thus well suited for evaluation and comparison of a variety of interpretation approaches, including those that employ structure models. The provided pixelwise ground truth assigns each image pixel both with a class label and an object label and o ffers thus ground truth annotation both on the level of pixels and regions. While we believe that such ground truth is of general interest in supervised learning, such data may be of further relevance in emerging real world applications involving automation of man-made scene interpretation.},
    institute = {Dept. of Photogrammetry, University of Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Korvc2009eTRIMS.pdf},
    }

  • R. Kuemmerle, B. Steder, C. Dornhege, M. Ruhnke, G. Grisetti, C. Stachniss, and A. Kleiner, “On measuring the accuracy of SLAM algorithms,” Autonomous Robots, vol. 27, p. 387ff, 2009.
    [BibTeX] [PDF]
    [none]
    @Article{kuemmerle2009,
    title = {On measuring the accuracy of {SLAM} algorithms},
    author = {R. Kuemmerle and B. Steder and C. Dornhege and M. Ruhnke and G. Grisetti and C. Stachniss and A. Kleiner},
    journal = auro,
    year = {2009},
    pages = {387ff},
    volume = {27},
    abstract = {[none]},
    issue = {4},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/kuemmerle09auro.pdf},
    }

  • J. Meidow, C. Beder, and W. Förstner, “Reasoning with uncertain points, straight lines, and straight line segments in 2D,” ISPRS Journal of Photogrammetry and Remote Sensing, vol. 64, iss. 2, p. 125–139, 2009. doi:10.1016/j.isprsjprs.2008.09.013
    [BibTeX] [PDF]

    Decisions based on basic geometric entities can only be optimal, if their uncertainty is propagated trough the entire reasoning chain. This concerns the construction of new entities from given ones, the testing of geometric relations between geometric entities, and the parameter estimation of geometric entities based on spatial relations which have been found to hold. Basic feature extraction procedures often provide measures of uncertainty. These uncertainties should be incorporated into the representation of geometric entities permitting statistical testing, eliminates the necessity of specifying non-interpretable thresholds and enables statistically optimal parameter estimation. Using the calculus of homogeneous coordinates the power of algebraic projective geometry can be exploited in these steps of image analysis. This review collects, discusses and evaluates the various representations of uncertain geometric entities in 2D together with their conversions. The representations are extended to achieve a consistent set of representations allowing geometric reasoning. The statistical testing of geometric relations is presented. Furthermore, a generic estimation procedure is provided for multiple uncertain geometric entities based on possibly correlated observed geometric entities and geometric constraints.

    @Article{meidow2009reasoning,
    title = {Reasoning with uncertain points, straight lines, and straight line segments in 2D},
    author = {Meidow, Jochen and Beder, Christian and F\"orstner, Wolfgang},
    journal = {ISPRS Journal of Photogrammetry and Remote Sensing},
    year = {2009},
    number = {2},
    pages = {125--139},
    volume = {64},
    abstract = {Decisions based on basic geometric entities can only be optimal, if their uncertainty is propagated trough the entire reasoning chain. This concerns the construction of new entities from given ones, the testing of geometric relations between geometric entities, and the parameter estimation of geometric entities based on spatial relations which have been found to hold. Basic feature extraction procedures often provide measures of uncertainty. These uncertainties should be incorporated into the representation of geometric entities permitting statistical testing, eliminates the necessity of specifying non-interpretable thresholds and enables statistically optimal parameter estimation. Using the calculus of homogeneous coordinates the power of algebraic projective geometry can be exploited in these steps of image analysis. This review collects, discusses and evaluates the various representations of uncertain geometric entities in 2D together with their conversions. The representations are extended to achieve a consistent set of representations allowing geometric reasoning. The statistical testing of geometric relations is presented. Furthermore, a generic estimation procedure is provided for multiple uncertain geometric entities based on possibly correlated observed geometric entities and geometric constraints.},
    city = {Bonn},
    doi = {10.1016/j.isprsjprs.2008.09.013},
    url = {https://www.ipb.uni-bonn.de/pdfs/Meidow2009Reasoning.pdf},
    }

  • J. Meidow, W. Förstner, and C. Beder, “Optimal Parameter Estimation with Homogeneous Entities and Arbitrary Constraints,” in Pattern Recognition (Symposium of DAGM), Jena, Germany, 2009, p. 292–301. doi:10.1007/978-3-642-03798-6_30
    [BibTeX] [PDF]

    Well known estimation techniques in computational geometry usually deal only with single geometric entities as unknown parameters and do not account for constrained observations within the estimation. The estimation model proposed in this paper is much more general, as it can handle multiple homogeneous vectors as well as multiple constraints. Furthermore, it allows the consistent handling of arbitrary covariance matrices for the observed and the estimated entities. The major novelty is the proper handling of singular observation covariance matrices made possible by additional constraints within the estimation. These properties are of special interest for instance in the calculus of algebraic projective geometry, where singular covariance matrices arise naturally from the non-minimal parameterizations of the entities. The validity of the proposed adjustment model will be demonstrated by the estimation of a fundamental matrix from synthetic data and compared to heteroscedastic regression [?], which is considered as state-ofthe- art estimator for this task. As the latter is unable to simultaneously estimate multiple entities, we will also demonstrate the usefulness and the feasibility of our approach by the constrained estimation of three vanishing points from observed uncertain image line segments.

    @InProceedings{meidow2009optimal,
    title = {Optimal Parameter Estimation with Homogeneous Entities and Arbitrary Constraints},
    author = {Meidow, Jochen and F\"orstner, Wolfgang and Beder, Christian},
    booktitle = {Pattern Recognition (Symposium of DAGM)},
    year = {2009},
    address = {Jena, Germany},
    editor = {Denzler, J. and Notni, G.},
    pages = {292--301},
    publisher = {Springer},
    series = {LNCS},
    abstract = {Well known estimation techniques in computational geometry usually deal only with single geometric entities as unknown parameters and do not account for constrained observations within the estimation. The estimation model proposed in this paper is much more general, as it can handle multiple homogeneous vectors as well as multiple constraints. Furthermore, it allows the consistent handling of arbitrary covariance matrices for the observed and the estimated entities. The major novelty is the proper handling of singular observation covariance matrices made possible by additional constraints within the estimation. These properties are of special interest for instance in the calculus of algebraic projective geometry, where singular covariance matrices arise naturally from the non-minimal parameterizations of the entities. The validity of the proposed adjustment model will be demonstrated by the estimation of a fundamental matrix from synthetic data and compared to heteroscedastic regression [?], which is considered as state-ofthe- art estimator for this task. As the latter is unable to simultaneously estimate multiple entities, we will also demonstrate the usefulness and the feasibility of our approach by the constrained estimation of three vanishing points from observed uncertain image line segments.},
    doi = {10.1007/978-3-642-03798-6_30},
    url = {https://www.ipb.uni-bonn.de/pdfs/Meidow2009Optimal.pdf},
    }

  • M. D. Mura, J. A. Benediktsson, B. Waske, and L. Bruzzone, “Morphological attribute filters for the analysis of very high resolution remote sensing images,” in IEEE International Geoscience and Remote Sensing Symposium (IGARSS 2009), 2009. doi:10.1109/IGARSS.2009.5418096
    [BibTeX]

    This paper proposes the use of morphological attribute profiles as an effective alternative to the conventional morphological operators based on the geodesic reconstruction for modeling the spatial information in very high resolution images. Attribute profiles, used in multilevel approaches, result particularly effective in terms of computational complexity and capabilities in characterizing the objects in the image. In addition they are more flexible than operators by reconstruction, thanks to the definition of possible different attributes. Experimental results obtained on a Quickbird panchromatic very high resolution image proved the effectiveness of the presented attribute filters and pointed out their main properties.

    @InProceedings{mura2009morphological,
    title = {Morphological attribute filters for the analysis of very high resolution remote sensing images},
    author = {Mura, M.D. and Benediktsson, J.A. and Waske, Bj\"orn and Bruzzone, L.},
    booktitle = {IEEE International Geoscience and Remote Sensing Symposium (IGARSS 2009)},
    year = {2009},
    abstract = {This paper proposes the use of morphological attribute profiles as an effective alternative to the conventional morphological operators based on the geodesic reconstruction for modeling the spatial information in very high resolution images. Attribute profiles, used in multilevel approaches, result particularly effective in terms of computational complexity and capabilities in characterizing the objects in the image. In addition they are more flexible than operators by reconstruction, thanks to the definition of possible different attributes. Experimental results obtained on a Quickbird panchromatic very high resolution image proved the effectiveness of the presented attribute filters and pointed out their main properties.},
    doi = {10.1109/IGARSS.2009.5418096},
    keywords = {Quickbird panchromatic imagery;computational complexity;geodesic reconstruction;morphological attribute filters;morphological operators;spatial information;very high resolution remote sensing images;computational complexity;geophysical image processing;image reconstruction;mathematical morphology;remote sensing;},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • M. Pilger, “Automatische Bestimmung skalierungsinvarianter Fenster für markante Bildpunkte,” Diploma Thesis Master Thesis, 2009.
    [BibTeX]

    Wir haben basierend auf dem Interestoperator von Förstner und Gülch einen skaleninvarianten Operator in Matlab implementiert, der möglichst präzise lokalisierbare Kantenschnittpunkte und ihre Skalen aus Bildern extrahiert. Dazu wurden C++-Bibliotheken zur Rauschschätzung und zur schnellen Berechnung von Faltungen nach der Methode von Deriche für Matlab verfügbar gemacht. Leider hat sich herausgestellt, dass die Faltungen mit dem Deriche-Filter für unsere spezielle Anwendung nicht geeignet ist: Es entstehen Artefakte in unserer Optimierungsfunktion, so dass eine zuverlässige Auswertung nicht gewährleistet ist. Indem wir unsere Funktionen durch Faltungen im Frequenzbereich berechnet haben, konnten wir zunächst auf Testbildern Kantenschnittpunkte mit entsprechenden Skalen extrahieren. Perfekte Skaleninvarianz bei Maßstabsänderung des Bildes konnten wir in einem Experiment nicht nachweisen: die detektierte Skala eines Kantenschnittpunktes wuchs im Experiment nicht schnell genug mit dem größer werdenden Bildmaßstab mit. Dennoch erzielten wir auf realen Bildern gute Ergebnisse und detektierten auf zwei Bildern, die sich durch bekannte geometrische oder radiometrische Transformationen unterscheiden, prozentual ähnlich viele korrespondierende Punkte und Skalen wie existierende skaleninvarianteInterestoperatoren. Gemessen an der absoluten Zahl der Detektionen liegt unser Operator weit hinter dem SIFT-Operator und dem Harris-Laplace Operator – beide entdecken auf realen Bildern meist mehr als doppelt so viele Punkte wie unser Operator. Allerdings kann unser Operator auf einen weiteren Typus von Interestpunkten erweitert werden, das sind Zentren kreissymmetrischer Bildmerkmale, oder allgemeiner auch auf spiralartige Merkmale. Damit kann in Zukunft möglicherweise das Manko der geringen Anzahl an Detektionen überwunden werden. Ohne einen Schwellwert detektiert unser Operator auch zufällig verteilte Punkte und Skalen in homogenen Bildbereichen. Wir haben gezeigt, dass es sinnvoll ist, ein Homogenitätsmaß zu benutzen, um Detektionen auf homogenen Bildbereichen zu unterdrücken, und dennoch auch nicht so gut lokalisierte Punkte, die aber zu einer Bildorientierung beitragen können, zu erhalten. Unser Operator lässt im derzeitigen Entwicklungsstadium noch Raum für Erweiterungen: neben der schon erwähnten Einbeziehung weiterer Punktmerkmale kann bei Farbbildern die Information aller drei Kanäle in die Detektion mit einbezogen werden, ähnlich wie bei Fuchs (1997), ohne das Bild unter Informationsverlust auf einen Helligkeitskanal zu reduzieren. Außerdem könnte untersucht werden, ob sich ein Oversampling des Bildes vor der Berechnung der quadratischen Gradienten, wie es Köthe (2003) vorschlägt, vorteilhaft auf die Punktdetektionen auswirkt. Wichtig für Anwendungen in der Praxis wäre auch eine deutliche Geschwindigkeitssteigerung. Abhängig von Bildgröße, Anzahl detektierter Punktkandidaten und Diskretisierungsdichte des Skalenraums kann die Detektion für ein Bild der Größe (800 x 800pel) bei eingeschalteter Subpixelschätzung auf einem 2,4 GHz Computer 15 Minuten dauern. Die meiste Zeit beanspruchen dabei die Faltungen und die kubische Interpolation bei der Subpixelschätzung. Die Zeit für die Faltungen könnte durch einen Übergang auf eine Pyramidendarstellung des Bildes im Skalenraum reduziert werden.

    @MastersThesis{pilger2009automatische,
    title = {Automatische Bestimmung skalierungsinvarianter Fenster f\"ur markante Bildpunkte},
    author = {Pilger, Marko},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2009},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Inform. Timo Dickscheid},
    type = {Diploma Thesis},
    abstract = {Wir haben basierend auf dem Interestoperator von F\"orstner und G\"ulch einen skaleninvarianten Operator in Matlab implementiert, der m\"oglichst pr\"azise lokalisierbare Kantenschnittpunkte und ihre Skalen aus Bildern extrahiert. Dazu wurden C++-Bibliotheken zur Rauschsch\"atzung und zur schnellen Berechnung von Faltungen nach der Methode von Deriche f\"ur Matlab verf\"ugbar gemacht. Leider hat sich herausgestellt, dass die Faltungen mit dem Deriche-Filter f\"ur unsere spezielle Anwendung nicht geeignet ist: Es entstehen Artefakte in unserer Optimierungsfunktion, so dass eine zuverl\"assige Auswertung nicht gew\"ahrleistet ist. Indem wir unsere Funktionen durch Faltungen im Frequenzbereich berechnet haben, konnten wir zun\"achst auf Testbildern Kantenschnittpunkte mit entsprechenden Skalen extrahieren. Perfekte Skaleninvarianz bei Ma{\ss}stabs\"anderung des Bildes konnten wir in einem Experiment nicht nachweisen: die detektierte Skala eines Kantenschnittpunktes wuchs im Experiment nicht schnell genug mit dem gr\"o{\ss}er werdenden Bildma{\ss}stab mit. Dennoch erzielten wir auf realen Bildern gute Ergebnisse und detektierten auf zwei Bildern, die sich durch bekannte geometrische oder radiometrische Transformationen unterscheiden, prozentual \"ahnlich viele korrespondierende Punkte und Skalen wie existierende skaleninvarianteInterestoperatoren. Gemessen an der absoluten Zahl der Detektionen liegt unser Operator weit hinter dem SIFT-Operator und dem Harris-Laplace Operator - beide entdecken auf realen Bildern meist mehr als doppelt so viele Punkte wie unser Operator. Allerdings kann unser Operator auf einen weiteren Typus von Interestpunkten erweitert werden, das sind Zentren kreissymmetrischer Bildmerkmale, oder allgemeiner auch auf spiralartige Merkmale. Damit kann in Zukunft m\"oglicherweise das Manko der geringen Anzahl an Detektionen \"uberwunden werden. Ohne einen Schwellwert detektiert unser Operator auch zuf\"allig verteilte Punkte und Skalen in homogenen Bildbereichen. Wir haben gezeigt, dass es sinnvoll ist, ein Homogenit\"atsma{\ss} zu benutzen, um Detektionen auf homogenen Bildbereichen zu unterdr\"ucken, und dennoch auch nicht so gut lokalisierte Punkte, die aber zu einer Bildorientierung beitragen k\"onnen, zu erhalten. Unser Operator l\"asst im derzeitigen Entwicklungsstadium noch Raum f\"ur Erweiterungen: neben der schon erw\"ahnten Einbeziehung weiterer Punktmerkmale kann bei Farbbildern die Information aller drei Kan\"ale in die Detektion mit einbezogen werden, \"ahnlich wie bei Fuchs (1997), ohne das Bild unter Informationsverlust auf einen Helligkeitskanal zu reduzieren. Au{\ss}erdem k\"onnte untersucht werden, ob sich ein Oversampling des Bildes vor der Berechnung der quadratischen Gradienten, wie es K\"othe (2003) vorschl\"agt, vorteilhaft auf die Punktdetektionen auswirkt. Wichtig f\"ur Anwendungen in der Praxis w\"are auch eine deutliche Geschwindigkeitssteigerung. Abh\"angig von Bildgr\"o{\ss}e, Anzahl
    detektierter Punktkandidaten und Diskretisierungsdichte des Skalenraums kann die Detektion f\"ur ein Bild der Gr\"o{\ss}e (800 x 800pel) bei eingeschalteter Subpixelsch\"atzung auf einem 2,4 GHz Computer 15 Minuten dauern. Die meiste Zeit beanspruchen dabei die Faltungen und die kubische Interpolation bei der Subpixelsch\"atzung. Die Zeit f\"ur die Faltungen k\"onnte durch einen \"Ubergang auf eine Pyramidendarstellung des Bildes im Skalenraum reduziert werden.},
    }

  • R. Roscher and W. Förstner, “Multiclass Bounded Logistic Regression – Efficient Regularization with Interior Point Method,” Department of Photogrammetry, University of Bonn, TR-IGG-P-2009-02, 2009.
    [BibTeX] [PDF]

    Logistic regression has been widely used in classi cation tasks for many years. Its optimization in case of linear separable data has received extensive study due to the problem of a monoton likelihood. This paper presents a new approach, called bounded logistic regression (BLR), by solving the logistic regression as a convex optimization problem with constraints. The paper tests the accuracy of BLR by evaluating nine well-known datasets and compares it to the closely related support vector machine approach (SVM).

    @TechReport{roscher2009multiclass,
    title = {Multiclass Bounded Logistic Regression -- Efficient Regularization with Interior Point Method},
    author = {Roscher, Ribana and F\"orstner, Wolfgang},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2009},
    number = {TR-IGG-P-2009-02},
    abstract = {Logistic regression has been widely used in classi cation tasks for many years. Its optimization in case of linear separable data has received extensive study due to the problem of a monoton likelihood. This paper presents a new approach, called bounded logistic regression (BLR), by solving the logistic regression as a convex optimization problem with constraints. The paper tests the accuracy of BLR by evaluating nine well-known datasets and compares it to the closely related support vector machine approach (SVM).},
    url = {https://www.ipb.uni-bonn.de/pdfs/Roscher2009Multiclass.pdf},
    }

  • J. Schmittwilken, M. Y. Yang, W. Förstner, and L. Plümer, “Integration of conditional random fields and attribute grammars for range data interpretation of man-made objects,” Annals of GIS, vol. 15, iss. 2, p. 117–126, 2009. doi:10.1080/19475680903464696
    [BibTeX] [PDF]

    A new concept for the integration of low- and high-level reasoning for the interpretation of images of man-made objects is described. The focus is on the 3D reconstruction of facades, especially the transition area between buildings and the surrounding ground. The aim is the identification of semantically meaningful objects such as stairs, entrances, and windows. A low-level module based on randomsample consensus (RANSAC) algorithmgenerates planar polygonal patches. Conditional random fields (CRFs) are used for their classification, based on local neighborhood and priors fromthe grammar. An attribute grammar is used to represent semantic knowledge including object partonomy and observable geometric constraints. The AND-OR tree-based parser uses the precision of the classified patches to control the reconstruction process and to optimize the sampling mechanism of RANSAC. Although CRFs are close to data, attribute grammars make the high-level structure of objects explicit and translate semantic knowledge in observable geometric constraints. Our approach combines top-down and bottom-up reasoning by integrating CRF and attribute grammars and thus exploits the complementary strengths of these methods.

    @Article{schmittwilken2009integration,
    title = {Integration of conditional random fields and attribute grammars for range data interpretation of man-made objects},
    author = {Schmittwilken, J\"org and Yang, Michael Ying and F\"orstner, Wolfgang and Pl\"umer, Lutz},
    journal = {Annals of GIS},
    year = {2009},
    number = {2},
    pages = {117--126},
    volume = {15},
    abstract = {A new concept for the integration of low- and high-level reasoning for the interpretation of images of man-made objects is described. The focus is on the 3D reconstruction of facades, especially the transition area between buildings and the surrounding ground. The aim is the identification of semantically meaningful objects such as stairs, entrances, and windows. A low-level module based on randomsample consensus (RANSAC) algorithmgenerates planar polygonal patches. Conditional random fields (CRFs) are used for their classification, based on local neighborhood and priors fromthe grammar. An attribute grammar is used to represent semantic knowledge including object partonomy and observable geometric constraints. The AND-OR tree-based parser uses the precision of the classified patches to control the reconstruction process and to optimize the sampling mechanism of RANSAC. Although CRFs are close to data, attribute grammars make the high-level structure of objects explicit and translate semantic knowledge in observable geometric constraints. Our approach combines top-down and bottom-up reasoning by integrating CRF and attribute grammars and thus exploits the complementary strengths of these methods.},
    doi = {10.1080/19475680903464696},
    url = {https://www.ipb.uni-bonn.de/pdfs/Schmittwilken2009Integration.pdf},
    }

  • A. Schneider, S. J. C. Stachniss, M. Reisert, H. Burkhardt, and W. Burgard, “Object Identification with Tactile Sensors Using Bag-of-Features,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2009.
    [BibTeX] [PDF]
    [none]
    @InProceedings{schneider2009,
    title = {Object Identification with Tactile Sensors Using Bag-of-Features},
    author = {A. Schneider and J. Sturm C. Stachniss and M. Reisert and H. Burkhardt and W. Burgard},
    booktitle = iros,
    year = {2009},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/wurm09iros.pdf},
    }

  • R. Schultz, “Orientierung einer Kamera in einer Legolandszene,” Bachelor Thesis Master Thesis, 2009.
    [BibTeX]

    Diese Arbeit untersucht ein Verfahren zur Bestimmung der äußeren Orientierung einer Kamera. Für viele Anwendungen in der Photogrammetrie ist es interessant, die äußere Orientierung der Kamera mit geringem Aufwand schätzen zu können. Die äußere Orientierung beschreibt die räumliche Lage der Kamera im Objektkoordinatensystem und lässt sich über die Fluchtpunkte bestimmen. Die Fluchtpunkte lassen sich in einer Legolandszene durch parallele Objektkanten schätzen. In einer Legolandszene bestehen alle Objekte aus Polyedern, die ausschließlich rechte Winkel haben. Hierbei sind die Polyeder parallel zueinander angeordnet. Legolandszenen sind eine Vereinfachung realer Bilder. Sie sollen dem Erlernen des Erkennens von Strukturen, in diesem Falle von Objektkanten dienen. Ziel ist es, eine Methode zu entwickeln, mit deren Hilfe im Bild Objektkanten, die zum gleichen Fluchtpunkt führen, gefunden werden können. Auf Grundlage dieser Kanten kann die äußere Orientierung der Kamera bestimmt werden. Es existiert ein Verfahren zur Bestimmung der äußeren Orientierung der Kamera, unter der Voraussetzung, dass die innere Orientierung bekannt ist. Dieses Verfahren wurde an der Universität Bonn von Prof. Förstner entwickelt. Aufgabe der Bachelorarbeit ist es, dieses Verfahren bezüglich seiner Kantenwahl zu verbessern. Es wurden in den Bildern Kanten segmentiert, unter welchen Kantenpaare manuell dahingehend untersucht wurden, ob sie zum gleichen Fluchtpunkt führen. Diese Datenmenge wurde in eine Test- und Trainingsmenge unterteilt. Die Daten der Trainingsmenge wurden verwendet, um anhand von geometrischen Eigenschaften zu untersuchen, ob ein Kantenpaar zum gleichen Fluchtpunkt führt. Es wurden der Abstand und der Winkel zwischen zwei Kanten sowie deren Überlappung untersucht. Weiterhin wurde zu den extrahierten Kanten eine Dreiecksvermaschung durch eine bedingte Delaunay- Triangulierung konstruiert, mit deren Hilfe ein Kantenzuordnungsverfahren entwickelt wurde. Diese geometrischen Eigenschaften wurden vorerst einzeln und später in Kombination mittels eines Entscheidungsbaumes untersucht. Die für die Eigenschaften ermittelten Kriterien wurden mit den Daten der Testmenge überprüft. Bei den untersuchten Daten erwies sich ein Winkel zwischen 13 Grad und 19 Grad als effektiv. Hiermit wurden 58 % der theoretisch maximalen Utility durch fehlerfreie Klassifikation erreicht, im Kontrast zu 10 % des ursprünglichen Verfahrens.

    @MastersThesis{schultz2009orientierung,
    title = {Orientierung einer Kamera in einer Legolandszene},
    author = {Schultz, Rebekka},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2009},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.- Inform. Timo Dickscheid},
    type = {Bachelor Thesis},
    abstract = {Diese Arbeit untersucht ein Verfahren zur Bestimmung der \"au{\ss}eren Orientierung einer Kamera. F\"ur viele Anwendungen in der Photogrammetrie ist es interessant, die \"au{\ss}ere Orientierung der Kamera mit geringem Aufwand sch\"atzen zu k\"onnen. Die \"au{\ss}ere Orientierung beschreibt die r\"aumliche Lage der Kamera im Objektkoordinatensystem und l\"asst sich \"uber die Fluchtpunkte bestimmen. Die Fluchtpunkte lassen sich in einer Legolandszene durch parallele Objektkanten sch\"atzen. In einer Legolandszene bestehen alle Objekte aus Polyedern, die ausschlie{\ss}lich rechte Winkel haben. Hierbei sind die Polyeder parallel zueinander angeordnet. Legolandszenen sind eine Vereinfachung realer Bilder. Sie sollen dem Erlernen des Erkennens von Strukturen, in diesem Falle von Objektkanten dienen. Ziel ist es, eine Methode zu entwickeln, mit deren Hilfe im Bild Objektkanten, die zum gleichen Fluchtpunkt f\"uhren, gefunden werden k\"onnen. Auf Grundlage dieser Kanten kann die \"au{\ss}ere Orientierung der Kamera bestimmt werden. Es existiert ein Verfahren zur Bestimmung der \"au{\ss}eren Orientierung der Kamera, unter der Voraussetzung, dass die innere Orientierung bekannt ist. Dieses Verfahren wurde an der Universit\"at Bonn von Prof. F\"orstner entwickelt. Aufgabe der Bachelorarbeit ist es, dieses Verfahren bez\"uglich seiner Kantenwahl zu verbessern. Es wurden in den Bildern Kanten segmentiert, unter welchen Kantenpaare manuell dahingehend untersucht wurden, ob sie zum gleichen Fluchtpunkt f\"uhren. Diese Datenmenge wurde in eine Test- und Trainingsmenge unterteilt. Die Daten der Trainingsmenge wurden verwendet, um anhand von geometrischen Eigenschaften zu untersuchen, ob ein Kantenpaar zum gleichen Fluchtpunkt f\"uhrt. Es wurden der Abstand und der Winkel zwischen zwei Kanten sowie deren \"Uberlappung untersucht. Weiterhin wurde zu den extrahierten Kanten eine Dreiecksvermaschung durch eine bedingte Delaunay- Triangulierung konstruiert, mit deren Hilfe ein Kantenzuordnungsverfahren entwickelt wurde. Diese geometrischen Eigenschaften wurden vorerst einzeln und sp\"ater in Kombination mittels eines Entscheidungsbaumes untersucht. Die f\"ur die Eigenschaften ermittelten Kriterien wurden mit den Daten der Testmenge \"uberpr\"uft. Bei den untersuchten Daten erwies sich ein Winkel zwischen 13 Grad und 19 Grad als effektiv. Hiermit wurden 58 % der theoretisch maximalen Utility durch fehlerfreie Klassifikation erreicht, im Kontrast zu 10 % des urspr\"unglichen Verfahrens.},
    }

  • C. Stachniss, “Spatial Modeling and Robot Navigation,” Habilitation PhD Thesis, 2009.
    [BibTeX] [PDF]
    [none]
    @PhDThesis{stachniss2009,
    title = {Spatial Modeling and Robot Navigation},
    author = {C. Stachniss},
    school = {University of Freiburg, Department of Computer Science},
    year = {2009},
    type = {Habilitation},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/stachniss-habil.pdf},
    }

  • C. Stachniss, Robotic Mapping and Exploration, Springer, 2009, vol. 55.
    [BibTeX]
    [none]
    @Book{stachniss2009a,
    title = {Robotic Mapping and Exploration},
    author = {C. Stachniss},
    publisher = {Springer},
    year = {2009},
    series = springerstaradvanced,
    volume = {55},
    abstract = {[none]},
    isbn = {978-3-642-01096-5},
    timestamp = {2014.04.24},
    }

  • C. Stachniss, O. Martinez Mozos, and W. Burgard, “Efficient Exploration of Unknown Indoor Environments using a Team of Mobile Robots,” Annals of Mathematics and Artificial Intelligence, vol. 52, p. 205ff, 2009.
    [BibTeX]
    [none]
    @Article{stachniss2009b,
    title = {Efficient Exploration of Unknown Indoor Environments using a Team of Mobile Robots},
    author = {Stachniss, C. and Martinez Mozos, O. and Burgard, W.},
    journal = {Annals of Mathematics and Artificial Intelligence},
    year = {2009},
    pages = {205ff},
    volume = {52},
    abstract = {[none]},
    issue = {2},
    timestamp = {2014.04.24},
    }

  • C. Stachniss, C. Plagemann, and A. J. Lilienthal, “Gas Distribution Modeling using Sparse Gaussian Process Mixtures,” Autonomous Robots, vol. 26, p. 187ff, 2009.
    [BibTeX]
    [none]
    @Article{stachniss2009c,
    title = {Gas Distribution Modeling using Sparse Gaussian Process Mixtures},
    author = {Stachniss, C. and Plagemann, C. and Lilienthal, A.J.},
    journal = auro,
    year = {2009},
    pages = {187ff},
    volume = {26},
    abstract = {[none]},
    issue = {2},
    timestamp = {2014.04.24},
    }

  • R. Steffen, “Visual SLAM from image sequences acquired by unmanned aerial vehicles,” PhD Thesis, 2009.
    [BibTeX]

    Die Verwendung der Triangulation zur Lösung des Problems der gleichzeitigen Lokalisierung und Kartierung findet seit Jahren ihren Eingang in die Entwicklung autonomer Systeme. Aufgrund von Echtzeitanforderungen dieser Systeme erreichen rekursive Schätzverfahren, insbesondere Kalmanfilter basierte Ansätze, große Beliebtheit. Bedauerlicherweise, treten dabei durch die Nichtlinearität der Triangulation einige Effekte auf, welche die Konsistenz und Genauigkeit der Lösung hinsichtlich der geschätzten Parameter maßgeblich beeinflussen. In der Literatur existieren dazu einige interessante Lösungsansätze, um diese genauigkeitsrelevanten Effekte zu minimieren. Die Motivation dieser Arbeit ist die These, dass die KaImanfilter basierte Lösung der Triangulation zur Lokalisierung und Kartierung aus Bildfolgen von unbemannten Drohnen realisierbar ist. Im Gegensatz zur klassischen Aero-Triangulation treten dadurch zusätzliche Aspekte in den Vordergrund, die in dieser Arbeit beleuchtet werden. Der erste Beitrag dieser Arbeit besteht in der Herleitung eines generellen Verfahrens zum rekursiven Verbessern im KaImanfilter mit impliziten Beobachtungsgleichungen. Wir zeigen, dass die klassischen Verfahren im Kalmanfilter eine Spezialisierung unseres Ansatzes darstellen. Im zweite Beitrag erweitern wir die klassische Modellierung für ein Einkameramodell im Kalmanfilter und formulieren linear berechenbare Bewegungsmodelle. Neben verschiedenen Verfahren zur Initialisierung von Neupunkten im Kalmanfilter aus der Literatur stellen wir in einem dritten Hauptbeitrag ein neues Verfahren vor. Am Beispiel von Bildfolgen eines unbemannten Flugobjektes zeigen wir in dieser Arbeit als vierten Beitrag, welche Genauigkeit zur Lokalisierung und Kartierung durch Triangulation möglich ist. Schließlich wird anhand von empirischen Untersuchungen unter Verwendung simulierter und realer Daten einer Bildfolge eines photogrammetrischen Streifens gezeigt und verglichen, welchen Einfluß die Initialisierungsmethoden für Neupunkte im Kalmanfilter haben und welche Genauigkeiten für diese Szenarien erreichbar sind.

    @PhDThesis{steffen2009visual,
    title = {Visual SLAM from image sequences acquired by unmanned aerial vehicles},
    author = {Steffen, Richard},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2009},
    abstract = {Die Verwendung der Triangulation zur L\"osung des Problems der gleichzeitigen Lokalisierung und Kartierung findet seit Jahren ihren Eingang in die Entwicklung autonomer Systeme. Aufgrund von Echtzeitanforderungen dieser Systeme erreichen rekursive Sch\"atzverfahren, insbesondere Kalmanfilter basierte Ans\"atze, gro{\ss}e Beliebtheit. Bedauerlicherweise, treten dabei durch die Nichtlinearit\"at der Triangulation einige Effekte auf, welche die Konsistenz und Genauigkeit der L\"osung hinsichtlich der gesch\"atzten Parameter ma{\ss}geblich beeinflussen. In der Literatur existieren dazu einige interessante L\"osungsans\"atze, um diese genauigkeitsrelevanten Effekte zu minimieren. Die Motivation dieser Arbeit ist die These, dass die KaImanfilter basierte L\"osung der Triangulation zur Lokalisierung und Kartierung aus Bildfolgen von unbemannten Drohnen realisierbar ist. Im Gegensatz zur klassischen Aero-Triangulation treten dadurch zus\"atzliche Aspekte in den Vordergrund, die in dieser Arbeit beleuchtet werden. Der erste Beitrag dieser Arbeit besteht in der Herleitung eines generellen Verfahrens zum rekursiven Verbessern im KaImanfilter mit impliziten Beobachtungsgleichungen. Wir zeigen, dass die klassischen Verfahren im Kalmanfilter eine Spezialisierung unseres Ansatzes darstellen. Im zweite Beitrag erweitern wir die klassische Modellierung f\"ur ein Einkameramodell im Kalmanfilter und formulieren linear berechenbare Bewegungsmodelle. Neben verschiedenen Verfahren zur Initialisierung von Neupunkten im Kalmanfilter aus der Literatur stellen wir in einem dritten Hauptbeitrag ein neues Verfahren vor. Am Beispiel von Bildfolgen eines unbemannten Flugobjektes zeigen wir in dieser Arbeit als vierten Beitrag, welche Genauigkeit zur Lokalisierung und Kartierung durch Triangulation m\"oglich ist. Schlie{\ss}lich wird anhand von empirischen Untersuchungen unter Verwendung simulierter und realer Daten einer Bildfolge eines photogrammetrischen Streifens gezeigt und verglichen, welchen Einflu{\ss} die Initialisierungsmethoden f\"ur Neupunkte im Kalmanfilter haben und welche Genauigkeiten f\"ur diese Szenarien erreichbar sind.},
    }

  • H. Strasdat, C. Stachniss, and W. Burgard, “Which Landmark is Useful? Learning Selection Policies for Navigation in Unknown Environments,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Kobe, Japan, 2009.
    [BibTeX]
    [none]
    @InProceedings{strasdat2009,
    title = {Which Landmark is Useful? Learning Selection Policies for Navigation in Unknown Environments},
    author = {H. Strasdat and Stachniss, C. and Burgard, W.},
    booktitle = icra,
    year = {2009},
    address = {Kobe, Japan},
    abstract = {[none]},
    timestamp = {2014.04.24},
    }

  • J. Sturm, V. Predeap, C. Stachniss, C. Plagemann, K. Konolige, and W. Burgard, “Learning Kinematic Models for Articulated Objects,” , Pasadena, CA, USA, 2009.
    [BibTeX]
    [none]
    @InProceedings{sturm2009a,
    title = {Learning Kinematic Models for Articulated Objects},
    author = {J. Sturm and V. Predeap and Stachniss, C. and C. Plagemann and K. Konolige and Burgard, W.},
    booktitle = ijcai,
    year = {2009},
    address = {Pasadena, CA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    }

  • J. Sturm, C. Stachniss, V. Predeap, C. Plagemann, K. Konolige, and W. Burgard, “Learning Kinematic Models for Articulated Objects,” in Online Proc. of the Learning Workshop (Snowbird), Clearwater, FL, USA, 2009.
    [BibTeX]
    [none]
    @InProceedings{sturm2009,
    title = {Learning Kinematic Models for Articulated Objects},
    author = {J. Sturm and Stachniss, C. and V. Predeap and C. Plagemann and K. Konolige and Burgard, W.},
    booktitle = {Online Proc. of the Learning Workshop (Snowbird)},
    year = {2009},
    address = {Clearwater, FL, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    }

  • J. Sturm, C. Stachniss, V. Predeap, C. Plagemann, K. Konolige, and W. Burgard, “Towards Understanding Articulated Objects,” in Workshop Integrating Mobility and Manipulation at Robotics: Science and Systems (RSS), Seattle, WA, USA, 2009.
    [BibTeX]
    [none]
    @InProceedings{sturm2009b,
    title = {Towards Understanding Articulated Objects},
    author = {J. Sturm and Stachniss, C. and V. Predeap and C. Plagemann and K. Konolige and Burgard, W.},
    booktitle = {Workshop Integrating Mobility and Manipulation at Robotics: Science and Systems (RSS)},
    year = {2009},
    address = {Seattle, WA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    }

  • J. R. Sveinsson, B. Waske, and J. A. Benediktsson, “Speckle reduction of TerraSAR-X imagery using TV segmentation,” in IEEE International Geoscience and Remote Sensing Symposium (IGARSS), 2009. doi:10.1109/IGARSS.2009.5417412
    [BibTeX]

    The nonsubsampled contourlet transform (NSCT) is a new image representation approach that has sparser representation at both spatial and directional resolution and thus captures smooth contours in images. On the other hand, wavelet transform has sparser representation of homogeneous areas. In this paper, we are going to use the three combinations of undecimated wavelet and nonsubsampled contourlet transforms that was used in for denoising of TerraSAR-X images. Two of the methods use the undecimated wavelet transform to de-noise homogeneous areas and the nonsubsampled contourlet transform to denoise areas with edges. The segmentation between homogeneous areas and areas with edges is done by using total variation segmentation. The third method is a linear averaging of the two denoising methods. A thresholding in the wavelet and contourlet domain is done by non-linear functions which are adapted for each selected subband. The non-linear functions are based on sigmoid functions. Simulation results suggested that these denoising schemes achieve good and clean images.

    @InProceedings{sveinsson2009speckle,
    title = {Speckle reduction of TerraSAR-X imagery using TV segmentation},
    author = {Sveinsson, J.R. and Waske, Bj\"orn and Benediktsson, J.A.},
    booktitle = {IEEE International Geoscience and Remote Sensing Symposium (IGARSS)},
    year = {2009},
    abstract = {The nonsubsampled contourlet transform (NSCT) is a new image representation approach that has sparser representation at both spatial and directional resolution and thus captures smooth contours in images. On the other hand, wavelet transform has sparser representation of homogeneous areas. In this paper, we are going to use the three combinations of undecimated wavelet and nonsubsampled contourlet transforms that was used in for denoising of TerraSAR-X images. Two of the methods use the undecimated wavelet transform to de-noise homogeneous areas and the nonsubsampled contourlet transform to denoise areas with edges. The segmentation between homogeneous areas and areas with edges is done by using total variation segmentation. The third method is a linear averaging of the two denoising methods. A thresholding in the wavelet and contourlet domain is done by non-linear functions which are adapted for each selected subband. The non-linear functions are based on sigmoid functions. Simulation results suggested that these denoising schemes achieve good and clean images.},
    doi = {10.1109/IGARSS.2009.5417412},
    keywords = {TV segmentation;TerraSAR-X imagery;directional resolution;image contours;image denoising;image representation;image segmentation;linear averaging;nonlinear functions;nonsubsampled contourlet transforms;sigmoid functions;spatial resolution;speckle reduction;total variation segmentation;undecimated wavelet transforms;feature extraction;geophysical image processing;geophysical techniques;image denoising;image representation;image resolution;image segmentation;radar imaging;remote sensing by radar;synthetic aperture radar;wavelet transforms;},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • T. Udelhoven, S. van der Linden, B. Waske, M. Stellmes, and L. Hoffmann, “Hypertemporal Classification of Large Areas Using Decision Fusion,” IEEE Geoscience and Remote Sensing Letters, vol. 6, iss. 3, p. 592–596, 2009. doi:10.1109/LGRS.2009.2021960
    [BibTeX]

    A novel multiannual land-cover-classification scheme for classifying hypertemporal image data is suggested, which is based on a supervised decision fusion (DF) approach. This DF approach comprises two steps: First, separate support vector machines (SVMs) are trained for normalized difference vegetation index (NDVI) time-series and mean annual temperature values of three consecutive years. In the second step, the information of the preliminary continuous SVM outputs, which represent posterior probabilities of the class assignments, is fused using a second-level SVM classifier. We tested the approach using the 10-day maximum-value NDVI composites from the “Mediterranean Extended Daily one-km Advanced Very High Resolution Radiometer Data Set” (MEDOKADS). The approach increases the classification accuracy and robustness compared with another DF method (simple majority voting) and with a single SVM expert that is trained for the same multiannual periods. The results clearly demonstrate that DF is a reliable technique for large-area mapping using hypertemporal data sets.

    @Article{udelhoven2009hypertemporal,
    title = {Hypertemporal Classification of Large Areas Using Decision Fusion},
    author = {Udelhoven, Thomas and van der Linden, Sebastian and Waske, Bj\"orn and Stellmes, Marion and Hoffmann, Lucien},
    journal = {IEEE Geoscience and Remote Sensing Letters},
    year = {2009},
    month = jul,
    number = {3},
    pages = {592--596},
    volume = {6},
    abstract = {A novel multiannual land-cover-classification scheme for classifying hypertemporal image data is suggested, which is based on a supervised decision fusion (DF) approach. This DF approach comprises two steps: First, separate support vector machines (SVMs) are trained for normalized difference vegetation index (NDVI) time-series and mean annual temperature values of three consecutive years. In the second step, the information of the preliminary continuous SVM outputs, which represent posterior probabilities of the class assignments, is fused using a second-level SVM classifier. We tested the approach using the 10-day maximum-value NDVI composites from the "Mediterranean Extended Daily one-km Advanced Very High Resolution Radiometer Data Set" (MEDOKADS). The approach increases the classification accuracy and robustness compared with another DF method (simple majority voting) and with a single SVM expert that is trained for the same multiannual periods. The results clearly demonstrate that DF is a reliable technique for large-area mapping using hypertemporal data sets.},
    doi = {10.1109/LGRS.2009.2021960},
    owner = {waske},
    sn = {1545-598X},
    tc = {2},
    timestamp = {2012.09.04},
    ut = {WOS:000267764800048},
    z8 = {0},
    z9 = {2},
    zb = {0},
    }

  • S. Valero, J. Chanussot, J. A. Benediktsson, H. Talbot, and B. Waske, “Directional mathematical morphology for the detection of the road network in Very High Resolution remote sensing images,” in 16th IEEE International Conf. on Image Processing (ICIP), 2009. doi:10.1109/ICIP.2009.5414344
    [BibTeX]

    This paper presents a new method for extracting roads in Very High Resolution remotely sensed images based on advanced directional morphological operators. The proposed approach introduces the use of Path Openings and Closings in order to extract structural pixel information. These morphological operators remain flexible enough to fit rectilinear and slightly curved structures since they do not depend on the choice of a structural element shape and hence outperform standard approaches using rotating rectangular structuring elements. The method consists in building a granulometry chain using Path Openings and Closing to perform Morphological Profiles. For each pixel, the Morphological Profile constitutes the feature vector on which our road extraction is based.

    @InProceedings{valero2009directional,
    title = {Directional mathematical morphology for the detection of the road network in Very High Resolution remote sensing images},
    author = {Valero, S. and Chanussot, J. and Benediktsson, J.A. and Talbot, H. and Waske, Bj\"orn},
    booktitle = {16th IEEE International Conf. on Image Processing (ICIP)},
    year = {2009},
    abstract = {This paper presents a new method for extracting roads in Very High Resolution remotely sensed images based on advanced directional morphological operators. The proposed approach introduces the use of Path Openings and Closings in order to extract structural pixel information. These morphological operators remain flexible enough to fit rectilinear and slightly curved structures since they do not depend on the choice of a structural element shape and hence outperform standard approaches using rotating rectangular structuring elements. The method consists in building a granulometry chain using Path Openings and Closing to perform Morphological Profiles. For each pixel, the Morphological Profile constitutes the feature vector on which our road extraction is based.},
    doi = {10.1109/ICIP.2009.5414344},
    issn = {1522-4880},
    keywords = {directional mathematical morphology;morphological profiles;path closings;path openings;rectilinear structures;road network detection;slightly curved structures;structural pixel information;very high resolution remote sensing images;geophysical image processing;geophysical techniques;mathematical morphology;remote sensing;roads;},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • X. Wang, B. Waske, and J. A. Benediktsson, “Ensemble methods for spectral-spatial classification of urban hyperspectral data,” in IEEE International Geoscience and Remote Sensing Symposium (IGARSS), 2009. doi:10.1109/IGARSS.2009.5417534
    [BibTeX]

    Classification of hyperspectral data with high spatial resolution from urban areas is investigated. The approach is an extension of existing approaches, using both spectral and spatial information for classification. The spatial information is derived by mathematical morphology and principal components of the hyperspectral data set, generating a set of different morphological profiles. The whole data set is classified by the Random Forest algorithm. However, the computational complexity as well as the increased dimensionality and redundancy of data sets based on morphological profiles are potential drawbacks. Thus, in the presented study, feature selection is applied, using nonparametric weighted feature extraction and the variable importance of the random forests. The proposed approach is applied to ROSIS data from an urban area. The experimental results demonstrate that a feature reduction is useful in terms of accuracy. Moreover, the proposed approach also shows excellent results with a limited training set.

    @InProceedings{wang2009ensemble,
    title = {Ensemble methods for spectral-spatial classification of urban hyperspectral data},
    author = {Xin-Lu Wang and Waske, Bj\"orn and Benediktsson, J.A.},
    booktitle = {IEEE International Geoscience and Remote Sensing Symposium (IGARSS)},
    year = {2009},
    abstract = {Classification of hyperspectral data with high spatial resolution from urban areas is investigated. The approach is an extension of existing approaches, using both spectral and spatial information for classification. The spatial information is derived by mathematical morphology and principal components of the hyperspectral data set, generating a set of different morphological profiles. The whole data set is classified by the Random Forest algorithm. However, the computational complexity as well as the increased dimensionality and redundancy of data sets based on morphological profiles are potential drawbacks. Thus, in the presented study, feature selection is applied, using nonparametric weighted feature extraction and the variable importance of the random forests. The proposed approach is applied to ROSIS data from an urban area. The experimental results demonstrate that a feature reduction is useful in terms of accuracy. Moreover, the proposed approach also shows excellent results with a limited training set.},
    doi = {10.1109/IGARSS.2009.5417534},
    keywords = {ROSIS data;computational complexity;data dimensionality;data redundancy;ensemble methods;feature selection;hyperspectral data classification;mathematical morphology;nonparametric weighted feature extraction;principal component analysis;random forest algorithm;spatial information classification;spectral information classification;urban hyperspectral data;decision trees;feature extraction;geophysical image processing;image classification;principal component analysis;remote sensing;},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • B. Waske, J. A. Benediktsson, K. Arnason, and J. R. Sveinsson, “Mapping of hyperspectral AVIRIS data using machine-learning algorithms,” Canadian Journal of Remote Sensing, vol. 35, p. 106–116, 2009. doi:10.5589/m09-018
    [BibTeX]

    Hyperspectral imaging provides detailed spectral and spatial information from the land cover that enables a precise differentiation between various surface materials. on the other hand, the performance of traditional and widely used statistical classification methods is often limited in this context, and thus alternative methods are required. In the study presented here, the performance of two machine-learning techniques, namely support vector machines (SVMs) and random forests (RFs), is investigated and the classification results are compared with those from well-known methods (i.e., maximum likelihood classifier and spectral angle mapper). The classifiers are applied to an Airborne Visible/Infrared Imaging Spectrometer (AVIRIS) dataset that was acquired near the Hekla volcano in Iceland. The results clearly show the advantages of the two proposed classifier algorithms in terms of accuracy. They significantly outperform the other methods and achieve overall accuracies of approximately 90%. Although SVM and RF show some diversity in the classification results, the global performance of the two classifiers is very similar. Thus, both methods can be considered attractive for the classification of hyperspectral data.

    @Article{waske2009mapping,
    title = {Mapping of hyperspectral AVIRIS data using machine-learning algorithms},
    author = {Waske, Bj\"orn and Benediktsson, Jon Atli and Arnason, Kolbeinn and Sveinsson, Johannes R.},
    journal = {Canadian Journal of Remote Sensing},
    year = {2009},
    pages = {106--116},
    volume = {35},
    abstract = {Hyperspectral imaging provides detailed spectral and spatial information from the land cover that enables a precise differentiation between various surface materials. on the other hand, the performance of traditional and widely used statistical classification methods is often limited in this context, and thus alternative methods are required. In the study presented here, the performance of two machine-learning techniques, namely support vector machines (SVMs) and random forests (RFs), is investigated and the classification results are compared with those from well-known methods (i.e., maximum likelihood classifier and spectral angle mapper). The classifiers are applied to an Airborne Visible/Infrared Imaging Spectrometer (AVIRIS) dataset that was acquired near the Hekla volcano in Iceland. The results clearly show the advantages of the two proposed classifier algorithms in terms of accuracy. They significantly outperform the other methods and achieve overall accuracies of approximately 90%. Although SVM and RF show some diversity in the classification results, the global performance of the two classifiers is very similar. Thus, both methods can be considered attractive for the classification of hyperspectral data.},
    doi = {10.5589/m09-018},
    owner = {waske},
    si = {SP},
    sn = {1712-7971},
    su = {1},
    tc = {3},
    timestamp = {2012.09.04},
    ut = {WOS:000275720100008},
    z8 = {1},
    z9 = {3},
    zb = {1},
    }

  • B. Waske, J. A. Benediktsson, and J. R. Sveinsson, “Fusion of multisource data sets from agricultural areas for improved land cover classification,” in IEEE International Geoscience and Remote Sensing Symposium (IGARSS), 2009. doi:10.1109/IGARSS.2009.5417536
    [BibTeX]

    An approach for spectral-spatial classification of multisource remote sensing data from agricultural areas is addressed. Mathematical morphology is used to derive the spatial information from the data sets. The different data sources (i.e., SAR and multispectral) are classified by support vector machines (SVM). Afterwards, the SVM outputs are transferred to probability measurements. These probability values are combined by different fusion strategies, to derive the final classification result. Comparing the results based on mathematical morphology the total accuracy increased by 6% compared to the pure-pixel classification results. Moreover the transfer of the SVM outputs into probability values and the subsequent fusion further increases the classification accuracy, resulting in an accuracy of 78.5%.

    @InProceedings{waske2009fusion,
    title = {Fusion of multisource data sets from agricultural areas for improved land cover classification},
    author = {Waske, Bj\"orn and Benediktsson, J.A. and Sveinsson, J.R.},
    booktitle = {IEEE International Geoscience and Remote Sensing Symposium (IGARSS)},
    year = {2009},
    abstract = {An approach for spectral-spatial classification of multisource remote sensing data from agricultural areas is addressed. Mathematical morphology is used to derive the spatial information from the data sets. The different data sources (i.e., SAR and multispectral) are classified by support vector machines (SVM). Afterwards, the SVM outputs are transferred to probability measurements. These probability values are combined by different fusion strategies, to derive the final classification result. Comparing the results based on mathematical morphology the total accuracy increased by 6% compared to the pure-pixel classification results. Moreover the transfer of the SVM outputs into probability values and the subsequent fusion further increases the classification accuracy, resulting in an accuracy of 78.5%.},
    doi = {10.1109/IGARSS.2009.5417536},
    keywords = {SAR remote sensing data;SVM;agricultural land cover classification;mathematical morphology;multisource data sets;multisource remote sensing data;multispectral remote sensing data;probability measurements;pure-pixel classification;spectral-spatial classification;support vector machines;geophysical image processing;geophysical techniques;image classification;mathematical morphology;remote sensing by radar;support vector machines;synthetic aperture radar;terrain mapping;vegetation mapping;},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • B. Waske and M. Braun, “Classifier ensembles for land cover mapping using multitemporal SAR imagery,” ISPRS Journal of Photogrammetry and Remote Sensing, vol. 64, iss. 5, p. 450–457, 2009. doi:10.1016/j.isprsjprs.2009.01.003
    [BibTeX]

    SAR data are almost independent from weather conditions, and thus are well suited for mapping of seasonally changing variables such as land cover. In regard to recent and upcoming missions, multitemporal and multi-frequency approaches become even more attractive. In the present study, classifier ensembles (i.e., boosted decision tree and random forests) are applied to multi-temporal C-band SAR data, from different study sites and years. A detailed accuracy assessment shows that classifier ensembles, in particularly random forests, outperform standard approaches like a single decision tree and a conventional maximum likelihood classifier by more than 10% independently from the site and year. They reach up to almost 84% of overall accuracy in rural areas with large plots. Visual interpretation confirms the statistical accuracy assessment and reveals that also typical random noise is considerably reduced. In addition the results demonstrate that random forests are less sensitive to the number of training samples and perform well even with only a small number. Random forests are computationally highly efficient and are hence considered very well suited for land cover classifications of future multifrequency and multitemporal stacks of SAR imagery. (C) 2009 International Society for Photogrammetry and Remote Sensing, Inc. (ISPRS). Published by Elsevier B.V. All rights reserved.

    @Article{waske2009classifier,
    title = {Classifier ensembles for land cover mapping using multitemporal SAR imagery},
    author = {Waske, Bj\"orn and Braun, Matthias},
    journal = {ISPRS Journal of Photogrammetry and Remote Sensing},
    year = {2009},
    month = sep,
    number = {5},
    pages = {450--457},
    volume = {64},
    abstract = {SAR data are almost independent from weather conditions, and thus are well suited for mapping of seasonally changing variables such as land cover. In regard to recent and upcoming missions, multitemporal and multi-frequency approaches become even more attractive. In the present study, classifier ensembles (i.e., boosted decision tree and random forests) are applied to multi-temporal C-band SAR data, from different study sites and years. A detailed accuracy assessment shows that classifier ensembles, in particularly random forests, outperform standard approaches like a single decision tree and a conventional maximum likelihood classifier by more than 10% independently from the site and year. They reach up to almost 84% of overall accuracy in rural areas with large plots. Visual interpretation confirms the statistical accuracy assessment and reveals that also typical random noise is considerably reduced. In addition the results demonstrate that random forests are less sensitive to the number of training samples and perform well even with only a small number. Random forests are computationally highly efficient and are hence considered very well suited for land cover classifications of future multifrequency and multitemporal stacks of SAR imagery. (C) 2009 International Society for Photogrammetry and Remote Sensing, Inc. (ISPRS). Published by Elsevier B.V. All rights reserved.},
    doi = {10.1016/j.isprsjprs.2009.01.003},
    owner = {waske},
    sn = {0924-2716},
    tc = {10},
    timestamp = {2012.09.04},
    ut = {WOS:000273381000003},
    z8 = {0},
    z9 = {10},
    zb = {2},
    }

  • B. Waske, M. Chi, J. A. Benediktsson, S. van der Linden, and B. Koetz, “Geospatial Technology for Earth Observation,” in Geospatial Technology for Earth Observation, D. Li, J. Shan, and J. Gong, Eds., Springer US, 2009, p. 203–233. doi:10.1007/978-1-4419-0050-0_8
    [BibTeX]

    During the last decades the manner how the Earth is being observed was revolutionized. Earth Observation (EO) systems became a valuable and powerful tool to monitor the Earth and had significant impact on the acquisition and analysis of environmental data (Rosenquist et al. 2003). Currently, EO data play a major role in supporting decision-making and surveying compliance of several multilateral environmental treaties, such as the Kyoto Protocol, the Convention on Biological Diversity, or the European initiative Global Monitoring for Environment and Security, GMES (Peter 2004, Rosenquist et al. 2003, Backhaus and Beule 2005). However, the need for such long-term monitoring of the Earth’s surface requires the standardized and coordinated use of global EO data sets, which has led, e.g., to the international Global Earth Observation System of Systems (GEOSS) initiative as well as to the Global Climate Observation System (GCOS) implementation plan (GCOS 2004, GEO 2005). The evolving EO technologies together with the requirements and standards arising from their exploitation demand increasingly improving algorithms, especially in the field of land cover classification

    @InBook{waske2009geospatial,
    title = {Geospatial Technology for Earth Observation},
    author = {Waske, Bj\"orn and Chi, Mingmin and Benediktsson, Jon Atli and van der Linden, Sebastian and Koetz, Benjamin},
    chapter = {Algorithms and Applications for Land Cover Classification - A Review},
    editor = {Li, Deren and Shan, Jie and Gong, Jianya},
    pages = {203--233},
    publisher = {Springer US},
    year = {2009},
    abstract = {During the last decades the manner how the Earth is being observed was revolutionized. Earth Observation (EO) systems became a valuable and powerful tool to monitor the Earth and had significant impact on the acquisition and analysis of environmental data (Rosenquist et al. 2003). Currently, EO data play a major role in supporting decision-making and surveying compliance of several multilateral environmental treaties, such as the Kyoto Protocol, the Convention on Biological Diversity, or the European initiative Global Monitoring for Environment and Security, GMES (Peter 2004, Rosenquist et al. 2003, Backhaus and Beule 2005). However, the need for such long-term monitoring of the Earth's surface requires the standardized and coordinated use of global EO data sets, which has led, e.g., to the international Global Earth Observation System of Systems (GEOSS) initiative as well as to the Global Climate Observation System (GCOS) implementation plan (GCOS 2004, GEO 2005). The evolving EO technologies together with the requirements and standards arising from their exploitation demand increasingly improving algorithms, especially in the field of land cover classification},
    affiliation = {Faculty of Electrical and Computer Engineering, University of Iceland, 107 Reykjavik, Iceland},
    booktitle = {Geospatial Technology for Earth Observation},
    doi = {10.1007/978-1-4419-0050-0_8},
    isbn = {978-1-4419-0050-0},
    keyword = {Earth and Environmental Science},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • B. Waske, M. Fauvel, J. A. Benediktsson, and J. Chanussot, “Machine Learning Techniques in Remote Sensing Data Analysis,” in Kernel Methods for Remote Sensing Data Analysis, G. Camps-Valls and L. Bruzzone, Eds., John Wiley & Sons, Ltd, 2009, p. 1–24. doi:10.1002/9780470748992.ch1
    [BibTeX]

    Several applications have been developed in the field of remote sensing image analysis during the last decades. Besides well-known statistical approaches, many recent methods are based on techniques taken from the field of machine learning. A major aim of machine learning algorithms in remote sensing is supervised classification, which is perhaps the most widely used image classification approach. In this chapter a brief introduction to machine learning and the different paradigms in remote sensing is given. Moreover this chapter briefly discusses the use of recent developments in supervised classification techniques such as neural networks, support vector machines and multiple classifier systems.

    @InBook{waske2009machine,
    title = {Machine Learning Techniques in Remote Sensing Data Analysis},
    author = {Waske, Bj\"orn and Fauvel, Mathieu and Benediktsson, Jon Atli and Chanussot, Jocelyn},
    chapter = {Machine Learning Techniques in Remote Sensing Data Analysis},
    editor = {Camps-Valls, Gustavo and Bruzzone, Lorenzo},
    pages = {1--24},
    publisher = {John Wiley \& Sons, Ltd},
    year = {2009},
    abstract = {Several applications have been developed in the field of remote sensing image analysis during the last decades. Besides well-known statistical approaches, many recent methods are based on techniques taken from the field of machine learning. A major aim of machine learning algorithms in remote sensing is supervised classification, which is perhaps the most widely used image classification approach. In this chapter a brief introduction to machine learning and the different paradigms in remote sensing is given. Moreover this chapter briefly discusses the use of recent developments in supervised classification techniques such as neural networks, support vector machines and multiple classifier systems.},
    booktitle = {Kernel Methods for Remote Sensing Data Analysis},
    doi = {10.1002/9780470748992.ch1},
    isbn = {9780470748992},
    keywords = {machine learning techniques in remote sensing data analysis, machine learning algorithms in remote sensing and supervised classification, remote sensing challenges, machine learning (ML) - artificial intelligence area and learning from data, remote sensing paradigms, feature extraction and feature selection and dimensionality reduction, Tasseled Cap Transformation, ISODATA (iterative self-organizing data analysis), neural networks (NN) in pattern recognition and remote sensing context, development in field of (supervised) classification machine learning concepts},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • B. Waske, S. van der Linden, J. A. Benediktsson, A. Rabe, and P. Hostert, “Impact of different morphological profiles on the classification accuracy of urban hyperspectral data,” in First Workshop on Hyperspectral Image and Signal Processing: Evolution in Remote Sensing (WHISPERS), 2009. doi:10.1109/WHISPERS.2009.5289078
    [BibTeX]

    We present a detailed study on the classification of urban hyperspectral data with morphological profiles (MP). Although such a spectral-spatial classification approach may significantly increase achieved accuracy, the computational complexity as well as the increased dimensionality and redundancy of such data sets are potential drawbacks. This can be overcome by feature selection. Moreover it is useful to derive detailed information on the contribution of different components from MP to the classification accuracy by evaluating these subsets. We apply a wrapper approach for feature selection based on support vector machines (SVM) with sequential feature forward selection (FFS) search strategy to two hyperspectral data sets that contain the first principal components (PC) and various corresponding MP from an urban area. In doing so, we identify feature subsets of increasing size that perform best in terms of kappa for the given setup. Results clearly demonstrate that maximum classification accuracies are achieved already on small feature subsets with few morphological profiles.

    @InProceedings{waske2009impact,
    title = {Impact of different morphological profiles on the classification accuracy of urban hyperspectral data},
    author = {Waske, Bj\"orn and van der Linden, S. and Benediktsson, J.A. and Rabe, A. and Hostert, P.},
    booktitle = {First Workshop on Hyperspectral Image and Signal Processing: Evolution in Remote Sensing (WHISPERS)},
    year = {2009},
    abstract = {We present a detailed study on the classification of urban hyperspectral data with morphological profiles (MP). Although such a spectral-spatial classification approach may significantly increase achieved accuracy, the computational complexity as well as the increased dimensionality and redundancy of such data sets are potential drawbacks. This can be overcome by feature selection. Moreover it is useful to derive detailed information on the contribution of different components from MP to the classification accuracy by evaluating these subsets. We apply a wrapper approach for feature selection based on support vector machines (SVM) with sequential feature forward selection (FFS) search strategy to two hyperspectral data sets that contain the first principal components (PC) and various corresponding MP from an urban area. In doing so, we identify feature subsets of increasing size that perform best in terms of kappa for the given setup. Results clearly demonstrate that maximum classification accuracies are achieved already on small feature subsets with few morphological profiles.},
    doi = {10.1109/WHISPERS.2009.5289078},
    keywords = {FFS search;computational complexity;feature forward selection;hyperspectral image;mathematical morphology;morphological profile;principal component;spectral-spatial classification;support vector machine;urban hyperspectral data classification;wrapper approach;feature extraction;image classification;mathematical morphology;principal component analysis;support vector machines;},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • S. Wenzel and W. Förstner, “The Role of Sequences for Incremental Learning,” Department of Photogrammetry, University of Bonn, TR-IGG-P-2009-04, 2009.
    [BibTeX] [PDF]

    This report points out the role of sequences of samples for training an incremental learning method. We define characteristics of incremental learning methods to describe the influence of sample ordering on the performance of a learned model. Different types of experiments evaluate these properties for two different datasets and two different incremental learning methods. We show how to find sequences of classes for training just based on the data to get always best possible error rates. This is based on the estimation of Bayes error bounds.

    @TechReport{wenzel2009role,
    title = {The Role of Sequences for Incremental Learning},
    author = {Wenzel, Susanne and F\"orstner, Wolfgang},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2009},
    month = oct,
    number = {TR-IGG-P-2009-04},
    abstract = {This report points out the role of sequences of samples for training an incremental learning method. We define characteristics of incremental learning methods to describe the influence of sample ordering on the performance of a learned model. Different types of experiments evaluate these properties for two different datasets and two different incremental learning methods. We show how to find sequences of classes for training just based on the data to get always best possible error rates. This is based on the estimation of Bayes error bounds.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Wenzel2009Role.pdf},
    }

  • K. M. Wurm, R. Kuemmerle, C. Stachniss, and W. Burgard, “Improving Robot Navigation in Structured Outdoor Environments by Identifying Vegetation from Laser Data,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2009.
    [BibTeX] [PDF]
    [none]
    @InProceedings{wurm2009,
    title = {Improving Robot Navigation in Structured Outdoor Environments by Identifying Vegetation from Laser Data},
    author = {K.M. Wurm and R. Kuemmerle and Stachniss, C. and Burgard, W.},
    booktitle = iros,
    year = {2009},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/wurm09iros.pdf},
    }

  • M. Y. Yang, “Multiregion Level-set Segmentation of Synthetic Aperture Radar Images,” in IEEE International Conf. on Image Processing, Cairo, 2009, p. 1717–1720. doi:10.1109/ICIP.2009.5413378
    [BibTeX] [PDF]

    Due to the presence of speckle, segmentation of SAR images is generally acknowledged as a difficult problem. A large effort has been done in order to cope with the influence of speckle noise on image segmentation such as edge detection or direct global segmentation. Recent works address this problem by using statistical image representation and deformable models. We suggest a novel variational approach to SAR image segmentation, which consists of minimizing a functional containing an original observation term derived from maximum a posteriori (MAP) estimation framework and a Gamma image representation. The minimization is carried out efficiently by a new multiregion method which embeds a simple partition assumption directly in curve evolution to guarantee a partition of the image domain from an arbitrary initial partition. Experiments on both synthetic and real images show the effectiveness of the proposed method.

    @InProceedings{yang2009multiregion,
    title = {Multiregion Level-set Segmentation of Synthetic Aperture Radar Images},
    author = {Yang, Michael Ying},
    booktitle = {IEEE International Conf. on Image Processing},
    year = {2009},
    address = {Cairo},
    pages = {1717--1720},
    abstract = {Due to the presence of speckle, segmentation of SAR images is generally acknowledged as a difficult problem. A large effort has been done in order to cope with the influence of speckle noise on image segmentation such as edge detection or direct global segmentation. Recent works address this problem by using statistical image representation and deformable models. We suggest a novel variational approach to SAR image segmentation, which consists of minimizing a functional containing an original observation term derived from maximum a posteriori (MAP) estimation framework and a Gamma image representation. The minimization is carried out efficiently by a new multiregion method which embeds a simple partition assumption directly in curve evolution to guarantee a partition of the image domain from an arbitrary initial partition. Experiments on both synthetic and real images show the effectiveness of the proposed method.},
    doi = {10.1109/ICIP.2009.5413378},
    url = {https://www.ipb.uni-bonn.de/pdfs/Yang2009Multiregion.pdf},
    }

  • Y. Yang, “Remote sensing image registration via active contour model,” International Journal of Electronics and Communications, vol. 65, p. 227–234, 2009. doi:10.1016/j.aeue.2008.01.003
    [BibTeX]

    Image registration is the process by which we determine a transformation that provides the most accurate match between two images. The search for the matching transformation can be automated with the use of a suitable metric, but it can be very time-consuming and tedious. In this paper, we introduce a registration algorithm that combines active contour segmentation together with mutual information. Our approach starts with a segmentation procedure. It is formed by a novel geometric active contour, which incorporates edge knowledge, namely Edgeflow, into active contour model. Two edgemap images filled with closed contours are obtained. After ruling out mismatched curves, we use mutual information (MI) as a similarity measure to register two edgemap images. Experimental results are provided to illustrate the performance of the proposed registration algorithm using both synthetic and multisensor images. Quantitative error analysis is also provided and several images are shown for subjective evaluation.

    @Article{yang2009remote,
    title = {Remote sensing image registration via active contour model},
    author = {Yang, Ying},
    journal = {International Journal of Electronics and Communications},
    year = {2009},
    pages = {227--234},
    volume = {65},
    abstract = {Image registration is the process by which we determine a transformation that provides the most accurate match between two images. The search for the matching transformation can be automated with the use of a suitable metric, but it can be very time-consuming and tedious. In this paper, we introduce a registration algorithm that combines active contour segmentation together with mutual information. Our approach starts with a segmentation procedure. It is formed by a novel geometric active contour, which incorporates edge knowledge, namely Edgeflow, into active contour model. Two edgemap images filled with closed contours are obtained. After ruling out mismatched curves, we use mutual information (MI) as a similarity measure to register two edgemap images. Experimental results are provided to illustrate the performance of the proposed registration algorithm using both synthetic and multisensor images. Quantitative error analysis is also provided and several images are shown for subjective evaluation.},
    doi = {10.1016/j.aeue.2008.01.003},
    }

2008

  • C. Beder and R. Steffen, “Incremental estimation without specifying a-priori covariance matrices for the novel parameters,” in VLMP Workshop on CVPR, Anchorage, USA, 2008. doi:10.1109/CVPRW.2008.4563139
    [BibTeX] [PDF]

    We will present a novel incremental algorithm for the task of online least-squares estimation. Our approach aims at combining the accuracy of least-squares estimation and the fast computation of recursive estimation techniques like the Kalman filter. Analyzing the structure of least-squares estimation we devise a novel incremental algorithm, which is able to introduce new unknown parameters and observations into an estimation simultaneously and is equivalent to the optimal overall estimation in case of linear models. It constitutes a direct generalization of the well-known Kalman filter allowing to augment the state vector inside the update step. In contrast to classical recursive estimation techniques no artificial initial covariance for the new unknown parameters is required here. We will show, how this new algorithm allows more flexible parameter estimation schemes especially in the case of scene and motion reconstruction from image sequences. Since optimality is not guaranteed in the non-linear case we will also compare our incremental estimation scheme to the optimal bundle adjustment on a real image sequence. It will be shown that competitive results are achievable using the proposed technique.

    @InProceedings{beder2008incremental,
    title = {Incremental estimation without specifying a-priori covariance matrices for the novel parameters},
    author = {Beder, Christian and Steffen, Richard},
    booktitle = {VLMP Workshop on CVPR},
    year = {2008},
    address = {Anchorage, USA},
    abstract = {We will present a novel incremental algorithm for the task of online least-squares estimation. Our approach aims at combining the accuracy of least-squares estimation and the fast computation of recursive estimation techniques like the Kalman filter. Analyzing the structure of least-squares estimation we devise a novel incremental algorithm, which is able to introduce new unknown parameters and observations into an estimation simultaneously and is equivalent to the optimal overall estimation in case of linear models. It constitutes a direct generalization of the well-known Kalman filter allowing to augment the state vector inside the update step. In contrast to classical recursive estimation techniques no artificial initial covariance for the new unknown parameters is required here. We will show, how this new algorithm allows more flexible parameter estimation schemes especially in the case of scene and motion reconstruction from image sequences. Since optimality is not guaranteed in the non-linear case we will also compare our incremental estimation scheme to the optimal bundle adjustment on a real image sequence. It will be shown that competitive results are achievable using the proposed technique.},
    doi = {10.1109/CVPRW.2008.4563139},
    url = {https://www.ipb.uni-bonn.de/pdfs/Beder2008Incremental.pdf},
    }

  • J. A. Benediktsson, X. Ceamanos Garcia, B. Waske, J. Chanussot, J. R. Sveinsson, and M. Fauvel, “Ensemble Methods for Classification of Hyperspectral Data,” in IEEE International Geoscience and Remote Sensing Symposium (IGARSS), 2008. doi:10.1109/IGARSS.2008.4778793
    [BibTeX]

    The classification of hyperspectral data is addressed using a classifier ensemble based on Support Vector Machines (SVM). First of all, the hyperspectral data set is decomposed into few sources according to the spectral bands correlation. Then, each source is treated separately and classified by an SVM classifier. Finally, all outputs are used as inputs for the final decision fusion, performed by an additional SVM classifier. The results of experiments, clearly show that the proposed SVM-based decision fusion outperforms a single SVM classifier in terms of overall accuracies.

    @InProceedings{benediktsson2008ensemble,
    title = {Ensemble Methods for Classification of Hyperspectral Data},
    author = {Benediktsson, Jon Atli and Ceamanos Garcia, X. and Waske, Bj\"orn and Chanussot, J. and Sveinsson, J.R. and Fauvel, M.},
    booktitle = {IEEE International Geoscience and Remote Sensing Symposium (IGARSS)},
    year = {2008},
    abstract = {The classification of hyperspectral data is addressed using a classifier ensemble based on Support Vector Machines (SVM). First of all, the hyperspectral data set is decomposed into few sources according to the spectral bands correlation. Then, each source is treated separately and classified by an SVM classifier. Finally, all outputs are used as inputs for the final decision fusion, performed by an additional SVM classifier. The results of experiments, clearly show that the proposed SVM-based decision fusion outperforms a single SVM classifier in terms of overall accuracies.},
    doi = {10.1109/IGARSS.2008.4778793},
    keywords = {Gaussian maximum likelihood method;SVM classifier;Support Vector Machines;decision fusion;ensemble classifier method;hyperspectral data classification;multisensor image classification;pattern recognition;spectral band correlation;geophysical techniques;geophysics computing;image classification;image processing;maximum likelihood estimation;pattern recognition;remote sensing;support vector machines;},
    timestamp = {2012.09.05},
    }

  • T. Dickscheid, T. Läbe, and W. Förstner, “Benchmarking Automatic Bundle Adjustment Results,” in 21st Congress of the International Society for Photogrammetry and Remote Sensing (ISPRS), Beijing, China, 2008, p. 7–12, Part B3a.
    [BibTeX] [PDF]

    In classical photogrammetry, point observations are manually determined by an operator for performing the bundle adjustment of a sequence of images. In such cases, a comparison of different estimates is usually carried out with respect to the estimated 3D object points. Today, a broad range of automatic methods are available for extracting and matching point features across images, even in the case of widely separated views and under strong deformations. This allows for fully automatic solutions to the relative orientation problem, and even to the bundle triangulation in case that manually measured control points are available. However, such systems often contain random subprocedures like RANSAC for eliminating wrong correspondences, yielding different 3D points but hopefully similar orientation parameters. This causes two problems for the evaluation: First, the randomness of the algorithm has an influence on its stability, and second, we are constrained to compare the orientation parameters instead of the 3D points. We propose a method for benchmarking automatic bundle adjustments which takes these constraints into account and uses the orientation parameters directly. Given sets of corresponding orientation parameters, we require our benchmark test to address their consistency of the form deviation and the internal precision and their precision level related to the precision of a reference data set. Besides comparing different bundle adjustment methods, the approach may be used to safely evaluate effects of feature operators, matching strategies, control parameters and other design decisions for a particular method. The goal of this paper is to derive appropriate measures to cover these aspects, describe a coherent benchmarking scheme and show the feasibility of the approach using real data.

    @InProceedings{dickscheid2008benchmarking,
    title = {Benchmarking Automatic Bundle Adjustment Results},
    author = {Dickscheid, Timo and L\"abe, Thomas and F\"orstner, Wolfgang},
    booktitle = {21st Congress of the International Society for Photogrammetry and Remote Sensing (ISPRS)},
    year = {2008},
    address = {Beijing, China},
    pages = {7--12, Part B3a},
    abstract = {In classical photogrammetry, point observations are manually determined by an operator for performing the bundle adjustment of a sequence of images. In such cases, a comparison of different estimates is usually carried out with respect to the estimated 3D object points. Today, a broad range of automatic methods are available for extracting and matching point features across images, even in the case of widely separated views and under strong deformations. This allows for fully automatic solutions to the relative orientation problem, and even to the bundle triangulation in case that manually measured control points are available. However, such systems often contain random subprocedures like RANSAC for eliminating wrong correspondences, yielding different 3D points but hopefully similar orientation parameters. This causes two problems for the evaluation: First, the randomness of the algorithm has an influence on its stability, and second, we are constrained to compare the orientation parameters instead of the 3D points. We propose a method for benchmarking automatic bundle adjustments which takes these constraints into account and uses the orientation parameters directly. Given sets of corresponding orientation parameters, we require our benchmark test to address their consistency of the form deviation and the internal precision and their precision level related to the precision of a reference data set. Besides comparing different bundle adjustment methods, the approach may be used to safely evaluate effects of feature operators, matching strategies, control parameters and other design decisions for a particular method. The goal of this paper is to derive appropriate measures to cover these aspects, describe a coherent benchmarking scheme and show the feasibility of the approach using real data.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Dickscheid2008Benchmarking.pdf},
    }

  • M. Drauschke, “Description of Stable Regions IPM,” Department of Photogrammetry, University of Bonn, TR-IGG-P-2008-03, 2008.
    [BibTeX] [PDF]

    The Stable Regions Image Processing Module is a low-level region detector. It delivers image parts of interest without any further interpretation. These image parts are all regions of an image which do not change much over a certain range in scale space of the image. The output of this IPM is a list of polygons of any shape and their rectangular bounding boxes, which both are saved into an xml-file.

    @TechReport{drauschke2008description,
    title = {Description of Stable Regions IPM},
    author = {Drauschke, Martin},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2008},
    month = mar,
    number = {TR-IGG-P-2008-03},
    abstract = {The Stable Regions Image Processing Module is a low-level region detector. It delivers image parts of interest without any further interpretation. These image parts are all regions of an image which do not change much over a certain range in scale space of the image. The output of this IPM is a list of polygons of any shape and their rectangular bounding boxes, which both are saved into an xml-file.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Drauschke2008Description.pdf},
    }

  • M. Drauschke, “Feature Subset Selection with Adaboost and ADTboost,” Department of Photogrammetry, University of Bonn, TR-IGG-P-2008-04, 2008.
    [BibTeX] [PDF]

    This technical report presents feature subset selection methods for two boosting classi cation frameworks: Adaboost and ADTboost.

    @TechReport{drauschke2008feature,
    title = {Feature Subset Selection with Adaboost and ADTboost},
    author = {Drauschke, Martin},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2008},
    month = mar,
    number = {TR-IGG-P-2008-04},
    abstract = {This technical report presents feature subset selection methods for two boosting classi cation frameworks: Adaboost and ADTboost.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Drauschke2008Feature.pdf},
    }

  • M. Drauschke, “Multi-class ADTboost,” Department of Photogrammetry, University of Bonn, TR-IGG-P-2008-06, 2008.
    [BibTeX] [PDF]

    This technical report gives a short review on boosting with alternating decision trees (ADTboost), which has been proposed by Freund & Mason (1999) and refined by De Comite et al. (2001). This approach is designed for two-class problems, and we extend it towards multi-class classification. The advantage of a multi-class boosting algorithm is its usage in scene interpretation with various kinds of objects. In these cases, two-class approaches will lead to several one class versus background (the other classes) classifications, where we must solve unappropriate results like “always background” or “two or more valid classes” for a sample.

    @TechReport{drauschke2008multi,
    title = {Multi-class ADTboost},
    author = {Drauschke, Martin},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2008},
    month = aug,
    number = {TR-IGG-P-2008-06},
    abstract = {This technical report gives a short review on boosting with alternating decision trees (ADTboost), which has been proposed by Freund & Mason (1999) and refined by De Comite et al. (2001). This approach is designed for two-class problems, and we extend it towards multi-class classification. The advantage of a multi-class boosting algorithm is its usage in scene interpretation with various kinds of objects. In these cases, two-class approaches will lead to several one class versus background (the other classes) classifications, where we must solve unappropriate results like "always background" or "two or more valid classes" for a sample.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Drauschke2008Multi.pdf},
    }

  • M. Drauschke, “Verbesserung des Multi-Dodgings mittels bikubischer Interpolation,” Department of Photogrammetry, University of Bonn, TR-IGG-P-2008-07, 2008.
    [BibTeX] [PDF]

    Aufgabenstellung: Digitalisierte 16-Bit-Luftbilder sollen automatisch verbessert werden. Dazu haben wir in (1) und (2) den Multi-Dodging-Ansatz vorgeschlagen. In diesem Verfahren wird ein Bild in sich nicht überlappende Ausschnitte (Patches) zerlegt. Dann wird in jedem dieser Bildausschnitte eine Histogrammverebnung durchgeführt. Da dieses Vorgehen die Patchgrenzen im verbesserten Bild hinterlässt, wurde abschließend zwischen den Patches bilinear interpoliert. In dieser Arbeit wird untersucht, ob die Verwendung einer bikubischen Interpolation an Stelle der bilinearen zu besseren Ergebnissen führt.

    @TechReport{drauschke2008verbesserung,
    title = {Verbesserung des Multi-Dodgings mittels bikubischer Interpolation},
    author = {Drauschke, Martin},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2008},
    number = {TR-IGG-P-2008-07},
    abstract = {Aufgabenstellung: Digitalisierte 16-Bit-Luftbilder sollen automatisch verbessert werden. Dazu haben wir in (1) und (2) den Multi-Dodging-Ansatz vorgeschlagen. In diesem Verfahren wird ein Bild in sich nicht \"uberlappende Ausschnitte (Patches) zerlegt. Dann wird in jedem dieser Bildausschnitte eine Histogrammverebnung durchgef\"uhrt. Da dieses Vorgehen die Patchgrenzen im verbesserten Bild hinterl\"asst, wurde abschlie{\ss}end zwischen den Patches bilinear interpoliert. In dieser Arbeit wird untersucht, ob die Verwendung einer bikubischen Interpolation an Stelle der bilinearen zu besseren Ergebnissen f\"uhrt.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Drauschke2008Verbesserung.pdf},
    }

  • M. Drauschke and W. Förstner, “Comparison of Adaboost and ADTboost for Feature Subset Selection,” in PRIS 2008, Barcelona, Spain, 2008, p. 113–122.
    [BibTeX] [PDF]

    This paper addresses the problem of feature selection within classification processes. We present a comparison of a feature subset selection with respect to two boosting methods, Adaboost and ADTboost. In our evaluation, we have focused on three different criteria: the classification error and the efficiency of the process depending on the number of most appropriate features and the number of training samples. Therefore, we discuss both techniques and sketch their functionality, where we restrict both boosting approaches to linear weak classifiers. We propose a feature subset selection method, which we evaluate on synthetic and on benchmark data sets.

    @InProceedings{drauschke2008comparison,
    title = {Comparison of Adaboost and ADTboost for Feature Subset Selection},
    author = {Drauschke, Martin and F\"orstner, Wolfgang},
    booktitle = {PRIS 2008},
    year = {2008},
    address = {Barcelona, Spain},
    pages = {113--122},
    abstract = {This paper addresses the problem of feature selection within classification processes. We present a comparison of a feature subset selection with respect to two boosting methods, Adaboost and ADTboost. In our evaluation, we have focused on three different criteria: the classification error and the efficiency of the process depending on the number of most appropriate features and the number of training samples. Therefore, we discuss both techniques and sketch their functionality, where we restrict both boosting approaches to linear weak classifiers. We propose a feature subset selection method, which we evaluate on synthetic and on benchmark data sets.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Drauschke2008Comparison.pdf},
    }

  • M. Drauschke and W. Förstner, “Selecting appropriate features for detecting buildings and building parts,” in 21st Congress of the International Society for Photogrammetry and Remote Sensing (ISPRS), Beijing, China, 2008, p. 447–452 Part B3b-1.
    [BibTeX] [PDF]

    The paper addresses the problem of feature selection during classification of image regions within the context of interpreting images showing highly structured objects such as buildings. We present a feature selection scheme that is connected with the classification framework Adaboost, cf. (Schapire and Singer, 1999). We constricted our weak learners on threshold classification on a single feature. Our experiments showed that the classification with Adaboost is based on relatively small subsets of features. Thus, we are able to find sets of appropriate features. We present our results on manually annotated and automatically segmented regions from facade images of the eTRIMS data base, where our focus were the object classes facade, roof, windows and window panes.

    @InProceedings{drauschke2008selecting,
    title = {Selecting appropriate features for detecting buildings and building parts},
    author = {Drauschke, Martin and F\"orstner, Wolfgang},
    booktitle = {21st Congress of the International Society for Photogrammetry and Remote Sensing (ISPRS)},
    year = {2008},
    address = {Beijing, China},
    pages = {447--452 Part B3b-1},
    abstract = {The paper addresses the problem of feature selection during classification of image regions within the context of interpreting images showing highly structured objects such as buildings. We present a feature selection scheme that is connected with the classification framework Adaboost, cf. (Schapire and Singer, 1999). We constricted our weak learners on threshold classification on a single feature. Our experiments showed that the classification with Adaboost is based on relatively small subsets of features. Thus, we are able to find sets of appropriate features. We present our results on manually annotated and automatically segmented regions from facade images of the eTRIMS data base, where our focus were the object classes facade, roof, windows and window panes.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Drauschke2008Selecting.pdf},
    }

  • B. Frank, M. Becker, C. Stachniss, M. Teschner, and W. Burgard, “Learning Cost Functions for Mobile Robot Navigation in Environments with Deformable Objects,” , Pasadena, CA, USA, 2008.
    [BibTeX] [PDF]
    [none]
    @InProceedings{frank2008,
    title = {Learning Cost Functions for Mobile Robot Navigation in Environments with Deformable Objects},
    author = {Frank, B. and Becker, M. and Stachniss, C. and Teschner, M. and Burgard, W.},
    booktitle = icrawsplanning,
    year = {2008},
    address = {Pasadena, CA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/frank08icraws.pdf},
    }

  • B. Frank, M. Becker, C. Stachniss, M. Teschner, and W. Burgard, “Efficient Path Planning for Mobile Robots in Environments with Deformable Objects,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Pasadena, CA, USA, 2008.
    [BibTeX] [PDF]
    [none]
    @InProceedings{frank2008a,
    title = {Efficient Path Planning for Mobile Robots in Environments with Deformable Objects},
    author = {Frank, B. and Becker, M. and Stachniss, C. and Teschner, M. and Burgard, W.},
    booktitle = icra,
    year = {2008},
    address = {Pasadena, CA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/frank08icra.pdf},
    }

  • G. Grisetti, D. Lordi Rizzini, C. Stachniss, E. Olson, and W. Burgard, “Online Constraint Network Optimization for Efficient Maximum Likelihood Map Learning,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Pasadena, CA, USA, 2008.
    [BibTeX] [PDF]
    [none]
    @InProceedings{grisetti2008,
    title = {Online Constraint Network Optimization for Efficient Maximum Likelihood Map Learning},
    author = {Grisetti, G. and Lordi Rizzini, D. and Stachniss, C. and Olson, E. and Burgard, W.},
    booktitle = icra,
    year = {2008},
    address = {Pasadena, CA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/grisetti08icra.pdf},
    }

  • L. Jensen, “Automatische Detektion von Bombentrichtern,” Bachelor Thesis Master Thesis, 2008.
    [BibTeX] [PDF]

    Der Kampfmittelbeseitigungsdienst der Bezirksregierung Arnsberg nutzt Luftbilder aus dem Zweiten Weltkrieg zur Detektion von Blindgängern. Aufgrund der großen Anzahl an Bildern (über 300000) ist die Suche sehr aufwändig. Die Arbeit der Auswerter könnte erleichtert werden, wenn sie eine Karte hätten, auf der die Dichte der Bombardierung dargestellt ist. Um diese Karte zu erstellen, ist ein Verfahren notwendig, das die Bombentrichter auf den Bildern automatisch detektiert. Dieses wurde in der vorliegenden Bachelorarbeit realisiert. Da die Trichter sich in ihrer Gestalt und Größe stark unterscheiden, muss ein Ansatz zur Detektion gewählt werden, der mit diesen Variationen umgehen kann. Der Algorithmus führt eine Kandidatensuche mittels Kreuzkorrelation des Bildes mit einem repräsentativen Trichter-Template in verschiedenen Größen durch und klassifiziert die gefundenen Kandidaten anschließend. Die Klassifizierung erfolgt mit Hilfe der Wahrscheinlichkeitsdichte der Verteilungen der Klassen Trichter und Hintergrund. Um die Verteilungsparameter zu schätzen, ist die Dimensionsreduktion des Merkmalsraums der Trainingsdaten mit einer Hauptkomponentenanalyse (PCA) und einer linearen Diskriminanzanalyse nach Fisher (LDA) und anschließender Projektion in den Unterraum notwendig. In dieser Arbeit wurde das Verfahren mit einer Trichterklasse implementiert, es kann aber gut auf verschiedene Trichterklassen erweitert werden. Der Algorithmus zur Bombentrichterdetektion wurde in Matlab implementiert. Nach der Vorverarbeitung des Bildmaterials mussten zur Erstellung des Templates zunächst Trainingsbilder annotiert werden. Außerdem waren bei der Umsetzung verschiedene Parameter, wie z.B. die Templategrößen zur Kandidatensuche, die Dimension des PCA-Raums und die Bildausschnittsgröße bei der Klassifikation zu bestimmen. Zur Beurteilung der Ergebnisse wurde der Algorithmus auf den Trainingsbildern getestet und die Ergebnisse mit den Referenzdaten verglichen. Je nachdem ob vier oder fünf Templategrößen verwendet werden, können mit dem erstellten Template etwa 75% oder 80% der Trichter erfasst werden. Nach der Klassifikation werden mit dem implementierten Algorithmus je nach Konfiguration zwischen 70% und 64% der Trichter detektiert, dabei ist die Relevanz allerdings sehr gering. Maximal sind etwa 31% der als Trichter klassifizierten Bildausschnitte auch tatsächlich Bombentrichter. Bei der Analyse der false positives auf Testbildern ergab sich, dass bestimmte Bildstrukturen, wie Hausdächer, Schattenwurf an Straßen, Texturen in Feldern oder Waldstrukturen immer wieder fälschlicherweise als Trichter klassifiziert werden. Bei der Untersuchung der nicht detektierten Bombentrichter konnten Trichterklassen abgeleitet werden, die mit dem erstellten Template nicht detektiert werden. Mit den Testbildern wurde außerdem die Möglichkeit untersucht, die Bilder mit Hilfe der Bombentrichterdetektion in die Kategorien schwache, mittlere und starke Bombardierung einzuordnen. Hierbei wurden 73% der Bilder der richtigen Kategorie zugeordnet. Bei einer Steigerung der Relevanz und der Annotation weiterer Testbilder ist eine bessere Einordnung zu erwarten. Insgesamt liegt mit dieser Arbeit ein vielversprechender Ansatz zur Bombentrichterdetektion mit großer Erweiterungsmöglichkeit vor.

    @MastersThesis{jensen2008automatische,
    title = {Automatische Detektion von Bombentrichtern},
    author = {Jensen, Laura},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2008},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Inform. Martin Drauschke},
    type = {Bachelor Thesis},
    abstract = {Der Kampfmittelbeseitigungsdienst der Bezirksregierung Arnsberg nutzt Luftbilder aus dem Zweiten Weltkrieg zur Detektion von Blindg\"angern. Aufgrund der gro{\ss}en Anzahl an Bildern (\"uber 300000) ist die Suche sehr aufw\"andig. Die Arbeit der Auswerter k\"onnte erleichtert werden, wenn sie eine Karte h\"atten, auf der die Dichte der Bombardierung dargestellt ist. Um diese Karte zu erstellen, ist ein Verfahren notwendig, das die Bombentrichter auf den Bildern automatisch detektiert. Dieses wurde in der vorliegenden Bachelorarbeit realisiert. Da die Trichter sich in ihrer Gestalt und Gr\"o{\ss}e stark unterscheiden, muss ein Ansatz zur Detektion gew\"ahlt werden, der mit diesen Variationen umgehen kann. Der Algorithmus f\"uhrt eine Kandidatensuche mittels Kreuzkorrelation des Bildes mit einem repr\"asentativen Trichter-Template in verschiedenen Gr\"o{\ss}en durch und klassifiziert die gefundenen Kandidaten anschlie{\ss}end. Die Klassifizierung erfolgt mit Hilfe der Wahrscheinlichkeitsdichte der Verteilungen der Klassen Trichter und Hintergrund. Um die Verteilungsparameter zu sch\"atzen, ist die Dimensionsreduktion des Merkmalsraums der Trainingsdaten mit einer Hauptkomponentenanalyse (PCA) und einer linearen Diskriminanzanalyse nach Fisher (LDA) und anschlie{\ss}ender Projektion in den Unterraum notwendig. In dieser Arbeit wurde das Verfahren mit einer Trichterklasse implementiert, es kann aber gut auf verschiedene Trichterklassen erweitert werden. Der Algorithmus zur Bombentrichterdetektion wurde in Matlab implementiert. Nach der Vorverarbeitung des Bildmaterials mussten zur Erstellung des Templates zun\"achst Trainingsbilder annotiert werden. Au{\ss}erdem waren bei der Umsetzung verschiedene Parameter, wie z.B. die Templategr\"o{\ss}en zur Kandidatensuche, die Dimension des PCA-Raums und die Bildausschnittsgr\"o{\ss}e bei der Klassifikation zu bestimmen. Zur Beurteilung der Ergebnisse wurde der Algorithmus auf den Trainingsbildern getestet und die Ergebnisse mit den Referenzdaten verglichen. Je nachdem ob vier oder f\"unf Templategr\"o{\ss}en verwendet werden, k\"onnen mit dem erstellten Template etwa 75% oder 80% der Trichter erfasst werden. Nach der Klassifikation werden mit dem implementierten Algorithmus je nach Konfiguration zwischen 70% und 64% der Trichter detektiert, dabei ist die Relevanz allerdings sehr gering. Maximal sind etwa 31% der als Trichter klassifizierten Bildausschnitte auch tats\"achlich Bombentrichter. Bei der Analyse der false positives auf Testbildern ergab sich, dass bestimmte Bildstrukturen, wie Hausd\"acher, Schattenwurf an Stra{\ss}en, Texturen in Feldern oder Waldstrukturen immer wieder f\"alschlicherweise als Trichter klassifiziert werden. Bei der Untersuchung der nicht detektierten Bombentrichter konnten Trichterklassen abgeleitet werden, die mit dem erstellten Template nicht detektiert werden. Mit den Testbildern wurde au{\ss}erdem die M\"oglichkeit untersucht, die Bilder mit Hilfe der
    Bombentrichterdetektion in die Kategorien schwache, mittlere und starke Bombardierung einzuordnen. Hierbei wurden 73% der Bilder der richtigen Kategorie zugeordnet. Bei einer Steigerung der Relevanz und der Annotation weiterer Testbilder ist eine bessere Einordnung zu erwarten. Insgesamt liegt mit dieser Arbeit ein vielversprechender Ansatz zur Bombentrichterdetektion mit gro{\ss}er Erweiterungsm\"oglichkeit vor.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Jensen2008Automatische.pdf},
    }

  • L. Jensen, “Schattenentfernung aus Farbbildern mit dem Retinex-Algorithmus,” Department of Photogrammetry, University of Bonn, TR-IGG-P-2008-01, 2008.
    [BibTeX] [PDF]
    [none]
    @TechReport{jensen2008schattenentfernung,
    title = {Schattenentfernung aus Farbbildern mit dem Retinex-Algorithmus},
    author = {Jensen, Laura},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2008},
    number = {TR-IGG-P-2008-01},
    abstract = {[none]},
    url = {https://www.ipb.uni-bonn.de/pdfs/Jensen2008Schattenentfernung.pdf},
    }

  • F. Korč and W. Förstner, “Approximate Parameter Learning in Conditional Random Fields: An Empirical Investigation,” in 30th Annual Symposium of the German Association for Pattern Recognition (DAGM), Munich, Germany, 2008, p. 11–20. doi:10.1007/978-3-540-69321-5_2
    [BibTeX] [PDF]

    We investigate maximum likelihood parameter learning in Conditional Random Fields (CRF) and present an empirical study of pseudo-likelihood (PL) based approximations of the parameter likelihood gradient. We show that these parameter learning methods can be improved and evaluate the resulting performance employing different inference techniques. We show that the approximation based on penalized pseudo-likelihood (PPL) in combination with the Maximum A Posteriori (MAP) inference yields results comparable to other state of the art approaches, while providing low complexity and advantages to formulating parameter learning as a convex optimization problem. Eventually, we demonstrate applicability on the task of detecting man-made structures in natural images.

    @InProceedings{korvc2008approximate,
    title = {Approximate Parameter Learning in Conditional Random Fields: An Empirical Investigation},
    author = {Kor{\vc}, Filip and F\"orstner, Wolfgang},
    booktitle = {30th Annual Symposium of the German Association for Pattern Recognition (DAGM)},
    year = {2008},
    address = {Munich, Germany},
    editor = {G. Rigoll},
    number = {5096},
    pages = {11--20},
    publisher = {Springer},
    series = {LNCS},
    abstract = {We investigate maximum likelihood parameter learning in Conditional Random Fields (CRF) and present an empirical study of pseudo-likelihood (PL) based approximations of the parameter likelihood gradient. We show that these parameter learning methods can be improved and evaluate the resulting performance employing different inference techniques. We show that the approximation based on penalized pseudo-likelihood (PPL) in combination with the Maximum A Posteriori (MAP) inference yields results comparable to other state of the art approaches, while providing low complexity and advantages to formulating parameter learning as a convex optimization problem. Eventually, we demonstrate applicability on the task of detecting man-made structures in natural images.},
    doi = {10.1007/978-3-540-69321-5_2},
    url = {https://www.ipb.uni-bonn.de/pdfs/Korvc2008Approximate.pdf},
    }

  • F. Korč and W. Förstner, “Finding Optimal Non-Overlapping Subset of Extracted Image Objects,” in Proc. of the 12th International Workshop on Combinatorial Image Analysis (IWCIA), Buffalo, USA, 2008.
    [BibTeX] [PDF]

    We present a solution to the following discrete optimization problem. Given a set of independent, possibly overlapping image regions and a non-negative likeliness of the individual regions, we select a non-overlapping subset that is optimal with respect to the following requirements: First, every region is either part of the solution or has an overlap with it. Second, the degree of overlap of the solution with the rest of the regions is maximized together with the likeliness of the solution. Third, the likeliness of the individual regions influences the overall solution proportionally to the degree of overlap with neighboring regions. We represent the problem as a graph and solve the task by reduction to a constrained binary integer programming problem. The problem involves minimizing a linear objective function subject to linear inequality constraints. Both the objective function and the constraints exploit the structure of the graph. We illustrate the validity and the relevance of the proposed formulation by applying the method to the problem of facade window extraction. We generalize our formulation to the case where a set of hypotheses is given together with a binary similarity relation and similarity measure. Our formulation then exploits combination of degree and structure of hypothesis similarity and likeliness of individual hypotheses. In this case, we present a solution with non-similar hypotheses which can be viewed as a non-redundant representation.

    @InProceedings{korvc2008finding,
    title = {Finding Optimal Non-Overlapping Subset of Extracted Image Objects},
    author = {Kor{\vc}, Filip and F\"orstner, Wolfgang},
    booktitle = {Proc. of the 12th International Workshop on Combinatorial Image Analysis (IWCIA)},
    year = {2008},
    address = {Buffalo, USA},
    abstract = {We present a solution to the following discrete optimization problem. Given a set of independent, possibly overlapping image regions and a non-negative likeliness of the individual regions, we select a non-overlapping subset that is optimal with respect to the following requirements: First, every region is either part of the solution or has an overlap with it. Second, the degree of overlap of the solution with the rest of the regions is maximized together with the likeliness of the solution. Third, the likeliness of the individual regions influences the overall solution proportionally to the degree of overlap with neighboring regions. We represent the problem as a graph and solve the task by reduction to a constrained binary integer programming problem. The problem involves minimizing a linear objective function subject to linear inequality constraints. Both the objective function and the constraints exploit the structure of the graph. We illustrate the validity and the relevance of the proposed formulation by applying the method to the problem of facade window extraction. We generalize our formulation to the case where a set of hypotheses is given together with a binary similarity relation and similarity measure. Our formulation then exploits combination of degree and structure of hypothesis similarity and likeliness of individual hypotheses. In this case, we present a solution with non-similar hypotheses which can be viewed as a non-redundant representation.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Korvc2008Finding.pdf},
    }

  • F. Korč and W. Förstner, “Interpreting Terrestrial Images of Urban Scenes Using Discriminative Random Fields,” in Proc. of the 21st Congress of the International Society for Photogrammetry and Remote Sensing (ISPRS), Beijing, China, 2008, p. 291–296 Part B3a.
    [BibTeX] [PDF]

    We investigate Discriminative Random Fields (DRF) which provide a principled approach for combining local discriminative classifiers that allow the use of arbitrary overlapping features, with adaptive data-dependent smoothing over the label field. We discuss the differences between a traditional Markov Random Field (MRF) formulation and the DRF model, and compare the performance of the two models and an independent sitewise classifier. Further, we present results suggesting the potential for performance enhancement by improving state of the art parameter learning methods. Eventually, we demonstrate the application feasibility on both synthetic and natural images.

    @InProceedings{korvc2008interpreting,
    title = {Interpreting Terrestrial Images of Urban Scenes Using Discriminative Random Fields},
    author = {Kor{\vc}, Filip and F\"orstner, Wolfgang},
    booktitle = {Proc. of the 21st Congress of the International Society for Photogrammetry and Remote Sensing (ISPRS)},
    year = {2008},
    address = {Beijing, China},
    pages = {291--296 Part B3a},
    abstract = {We investigate Discriminative Random Fields (DRF) which provide a principled approach for combining local discriminative classifiers that allow the use of arbitrary overlapping features, with adaptive data-dependent smoothing over the label field. We discuss the differences between a traditional Markov Random Field (MRF) formulation and the DRF model, and compare the performance of the two models and an independent sitewise classifier. Further, we present results suggesting the potential for performance enhancement by improving state of the art parameter learning methods. Eventually, we demonstrate the application feasibility on both synthetic and natural images.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Korvc2008Interpreting.pdf},
    }

  • H. Kretzschmar, C. Stachniss, C. Plagemann, and W. Burgard, “Estimating Landmark Locations from Geo-Referenced Photographs,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), Nice, France, 2008.
    [BibTeX] [PDF]
    [none]
    @InProceedings{kretzschmar2008,
    title = {Estimating Landmark Locations from Geo-Referenced Photographs},
    author = {Kretzschmar, H. and Stachniss, C. and Plagemann, C. and W. Burgard},
    booktitle = iros,
    year = {2008},
    address = {Nice, France},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/kretzschmar08iros.pdf},
    }

  • T. Läbe, T. Dickscheid, and W. Förstner, “On the Quality of Automatic Relative Orientation Procedures,” in 21st Congress of the International Society for Photogrammetry and Remote Sensing (ISPRS), Beijing, China, 2008, p. 37–42 Part B3b-1.
    [BibTeX] [PDF]

    This paper presents an empirical investigation into the quality of automatic relative orientation procedures. The results of an in-house developed automatic orientation software called aurelo (Laebe and Foerstner, 2006) are evaluated. For this evaluation a recently proposed consistency measure for two sets of orientation parameters (Dickscheid et. al., 2008) and the ratio of two covariances matrices is used. Thus we evaluate the consistency of bundle block adjustments and the precision level achievable. We use different sets of orientation results related to the same set of images but computed under differing conditions. As reference datasets results on a much higher image resolution and ground truth data from artificial images rendered with computer graphics software are used. Six different effects are analysed: varying results due to random procedures in aurelo, computations on different image pyramid levels and with or without points with only two or three observations, the effect of replacing the used SIFT operator with an approximation of SIFT features, called SURF, repetitive patterns in the scene and remaining non-linear distortions. These experiments show under which conditions the bundle adjustment results reflect the true errors and thus give valuable hints for the use of automatic relative orientation procedures and possible improvements of the software.

    @InProceedings{labe2008quality,
    title = {On the Quality of Automatic Relative Orientation Procedures},
    author = {L\"abe, Thomas and Dickscheid, Timo and F\"orstner, Wolfgang},
    booktitle = {21st Congress of the International Society for Photogrammetry and Remote Sensing (ISPRS)},
    year = {2008},
    address = {Beijing, China},
    pages = {37--42 Part B3b-1},
    abstract = {This paper presents an empirical investigation into the quality of automatic relative orientation procedures. The results of an in-house developed automatic orientation software called aurelo (Laebe and Foerstner, 2006) are evaluated. For this evaluation a recently proposed consistency measure for two sets of orientation parameters (Dickscheid et. al., 2008) and the ratio of two covariances matrices is used. Thus we evaluate the consistency of bundle block adjustments and the precision level achievable. We use different sets of orientation results related to the same set of images but computed under differing conditions. As reference datasets results on a much higher image resolution and ground truth data from artificial images rendered with computer graphics software are used. Six different effects are analysed: varying results due to random procedures in aurelo, computations on different image pyramid levels and with or without points with only two or three observations, the effect of replacing the used SIFT operator with an approximation of SIFT features, called SURF, repetitive patterns in the scene and remaining non-linear distortions. These experiments show under which conditions the bundle adjustment results reflect the true errors and thus give valuable hints for the use of automatic relative orientation procedures and possible improvements of the software.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Labe2008Quality.pdf},
    }

  • J. Müller, C. Stachniss, K. O. Arras, and W. Burgard, “Socially Inspired Motion Planning for Mobile Robots in Populated Environments,” in Intl. Conf. on Cognitive Systems (CogSys), Baden Baden, Germany, 2008.
    [BibTeX]
    [none]
    @InProceedings{muller2008,
    title = {Socially Inspired Motion Planning for Mobile Robots in Populated Environments},
    author = {M\"uller, J. and Stachniss, C. and Arras, K.O. and Burgard, W.},
    booktitle = cogsys,
    year = {2008},
    address = {Baden Baden, Germany},
    abstract = {[none]},
    timestamp = {2014.04.24},
    }

  • M. Muffert, “Durchführung von Untersuchungen zur Bewertung der Messqualität eines Faro-Messarms des Typs “Titanium”,” Bachelor Thesis Master Thesis, 2008.
    [BibTeX]

    In the following study we carry out initial research on the FaroArm Titanium. The results allow conclusions regarding the accuracy and reliability of measurement depending on the measurement position. In order to draw conclusions about the accuracy we have developed and applied several measuring techniques. The FaroArm Titanium (FaroArm) is a mobile precision measurement arm which can be described as an industrial robot. It has particular use in quality control and, in mechanical engineering in what is known as Reverse Engineering. In accordance with company specifications, single point accuracies of 0.05mm are achieved. Mobile precision measurement arms consist of links various lengths which are usually connected bysix or seven revolute joints. The number ofthe axes ofrotation gives the number of degrees of freedom of the measurement arm. Company specifications regarding the lengths of the axes do not lie within the required range of accuracy. The orientation ofthe revolute joints is unknown. In this study we deal with fundamental mathematical and statistical procedures for spatial orientation. The Denavit-Hartenberg-Convention is of particular importance in robotic forward kinematics. The best estimation of spatial orientation is crucial in the measurements to be taken. The main part of this study deals with the development and application of measurement concepts which will result in the first information about the accuracy of measurement ofthe FaroArm. First we modelIed the forward kinematics of the robot by means of rough estimates of the axis length. A direct comparison between the self chosen coordinates and the nominal coordinates is impossible. For results ofthe accuracy ofmeasurement, we compared the scatter plots ofthe FaroArm with reference scatter plots ofthe Lasertracker Smart 310 from the Leica Company. An exact definition of the points is therefore required, wh ich we have achieved by centring an aluminium plate. The plate has metal cylinders in which cones are inserted. The cones serve to define the points. The different centring positions on the board were measured with both measuring systems. For both systems the spatial transformation between different plate positions were determined and compared. In this way we obtained our own information about measuring quality and accuracy ofthe FaroArm. The comparison between the two different measuring systems reveals gross errors in the ovservations. These can be attributed to incorrect operation or uncertainty in the centring of the measuring plate. A direct outlier control has to be taken after every measurement.

    @MastersThesis{muffert2008durchfuhrung,
    title = {Durchf\"uhrung von Untersuchungen zur Bewertung der Messqualit\"at eines Faro-Messarms des Typs "Titanium"},
    author = {Muffert, Maximilian},
    school = {University of Bonn In Zusammenarbeit mit dem Lehrstuhl f\"ur Geod\"asie des IGG},
    year = {2008},
    note = {Betreuung: Dr.-Ing. Wolfgang Schauerte, Prof. Dr.-Ing. Wolfgang F\"orstner},
    type = {Bachelor Thesis},
    abstract = {In the following study we carry out initial research on the FaroArm Titanium. The results allow conclusions regarding the accuracy and reliability of measurement depending on the measurement position. In order to draw conclusions about the accuracy we have developed and applied several measuring techniques. The FaroArm Titanium (FaroArm) is a mobile precision measurement arm which can be described as an industrial robot. It has particular use in quality control and, in mechanical engineering in what is known as Reverse Engineering. In accordance with company specifications, single point accuracies of 0.05mm are achieved. Mobile precision measurement arms consist of links various lengths which are usually connected bysix or seven revolute joints. The number ofthe axes ofrotation gives the number of degrees of freedom of the measurement arm. Company specifications regarding the lengths of the axes do not lie within the required range of accuracy. The orientation ofthe revolute joints is unknown. In this study we deal with fundamental mathematical and statistical procedures for spatial orientation. The Denavit-Hartenberg-Convention is of particular importance in robotic forward kinematics. The best estimation of spatial orientation is crucial in the measurements to be taken. The main part of this study deals with the development and application of measurement concepts which will result in the first information about the accuracy of measurement ofthe FaroArm. First we modelIed the forward kinematics of the robot by means of rough estimates of the axis length. A direct comparison between the self chosen coordinates and the nominal coordinates is impossible. For results ofthe accuracy ofmeasurement, we compared the scatter plots ofthe FaroArm with reference scatter plots ofthe Lasertracker Smart 310 from the Leica Company. An exact definition of the points is therefore required, wh ich we have achieved by centring an aluminium plate. The plate has metal cylinders in which cones are inserted. The cones serve to define the points. The different centring positions on the board were measured with both measuring systems. For both systems the spatial transformation between different plate positions were determined and compared. In this way we obtained our own information about measuring quality and accuracy ofthe FaroArm. The comparison between the two different measuring systems reveals gross errors in the ovservations. These can be attributed to incorrect operation or uncertainty in the centring of the measuring plate. A direct outlier control has to be taken after every measurement.},
    }

  • P. Pfaff, C. Stachniss, C. Plagemann, and W. Burgard, “Efficiently Learning High-dimensional Observation Models for Monte-Carlo Localization using Gaussian Mixtures,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), Nice, France, 2008.
    [BibTeX] [PDF]
    [none]
    @InProceedings{pfaff2008,
    title = {Efficiently Learning High-dimensional Observation Models for Monte-Carlo Localization using Gaussian Mixtures},
    author = {Pfaff, P. and Stachniss, C. and Plagemann, C. and W. Burgard},
    booktitle = iros,
    year = {2008},
    address = {Nice, France},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/pfaff08iros.pdf},
    }

  • C. Plagemann, F. Endres, J. Hess, C. Stachniss, and W. Burgard, “Monocular Range Sensing: A Non-Parametric Learning Approach,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Pasadena, CA, USA, 2008.
    [BibTeX] [PDF]
    [none]
    @InProceedings{plagemann2008,
    title = {Monocular Range Sensing: A Non-Parametric Learning Approach},
    author = {Plagemann, C. and Endres, F. and Hess, J. and Stachniss, C. and Burgard, W.},
    booktitle = icra,
    year = {2008},
    address = {Pasadena, CA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/plagemann08icra.pdf},
    }

  • R. Roscher, “Bestimmung von 3D-Merkmalen von Bildregionen aus Stereobildern,” Department of Photogrammetry, University of Bonn, TR-IGG-P-2008-05, 2008.
    [BibTeX] [PDF]

    Dieser Report erläutert die Bestimmung von 3D-Merkmalen von Bildregionen durch Zuordnung von Bildpunkten in diesen Regionen zu den Objektpunkten in einer Punktwolke.Die Umsetzung erfolgt in einer grafischen Benutzeroberfläche in Matlab, deren Bedienung in diesem Report veranschaulicht werden soll.

    @TechReport{roscher2008bestimmung,
    title = {Bestimmung von 3D-Merkmalen von Bildregionen aus Stereobildern},
    author = {Roscher, Ribana},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2008},
    number = {TR-IGG-P-2008-05},
    abstract = {Dieser Report erl\"autert die Bestimmung von 3D-Merkmalen von Bildregionen durch Zuordnung von Bildpunkten in diesen Regionen zu den Objektpunkten in einer Punktwolke.Die Umsetzung erfolgt in einer grafischen Benutzeroberfl\"ache in Matlab, deren Bedienung in diesem Report veranschaulicht werden soll.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Roscher2008Bestimmung.pdf},
    }

  • R. Roscher, “Lernen linearer probabilistischer diskriminativer Modelle für die semantische Bildsegmentierung,” Diploma Thesis Master Thesis, 2008.
    [BibTeX] [PDF]
    [none]
    @MastersThesis{roscher2008lernen,
    title = {Lernen linearer probabilistischer diskriminativer Modelle f\"ur die semantische Bildsegmentierung},
    author = {Roscher, Ribana},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2008},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Ing. Filip Kor{\vc}},
    type = {Diploma Thesis},
    abstract = {[none]},
    url = {https://www.ipb.uni-bonn.de/pdfs/Roscher2008Lernen.pdf},
    }

  • B. Schmeing, “Analyse des Bewegungsmusters von Objekten,” Bachelor Thesis Master Thesis, 2008.
    [BibTeX]

    Die vorliegende Arbeit beschäftigt sich mit der Analyse der Bewegungsmuster von Fußgängern. Wir entwickeln Methoden zur Erfassung, Modellierung und Klassifikation der Bewegung und untersuchen ihre Eignung an realen Daten. Für das implementierte Klassifikationsverfahren wählten wir einen diskriminativen Ansatz, d.h. Ziel der Klassifikation ist einzig die Unterscheidung verschiedener Bewegungsmuster. Auf die Realisierung eines generativen Ansatzes, der auch die Erzeugung synthetischer Bewegungsmuster erlaubt, wurde verzichtet. Der Algorithmus soll die Bewegungsmuster “Gehen”, “Hinken” und “Laufen” anhand von Merkmalen unterscheiden, die aus der Eigenbewegung mehrerer Probanden abgeleitet sind. Als Datengrundlage dienen mittels einer fest an der Brust angebrachten Kamera aufgenommene Rotationszeitreihen der Eigenbewegung der Probanden. Für jedes Bewegungsmuster stehen ca. 200 Videosequenzen von 5 Sekunden Länge (je 150 Bilder) zur Verfügung; die Rotationszeitreihen werden aus den Rotationen zwischen aufeinanderfolgenden Bildern erzeugt. Als Merkmalsvektoren für die Klassifikation dienen die Leistungsspektren der Rotationszeitreihen. Die Klassifikation basiert auf Fisher’s Linearer Diskriminante. Dabei werden in der Trainingsphase die Merkmalsvektoren von 378 Bildfolgen bearbeitet. Im ersten Schritt findet eine Projektion in den Entscheidungsraum mittels Linearer Diskriminanzanalyse (LDA) statt. Die Projektion ist so gewählt, dass sich die Klassen im Entscheidungsraum maximal unterscheiden. Im zweiten Schritt wird die Verteilung der projizierten Datenpunkte für jede Klasse bestimmt. Eine zuvor durchgeführte Dimensionsreduktion mittels Hauptkompentenanalyse (PCA) reduziert die Dimension des Klassifikationsproblems und verbessert so die Numerik bei der LDA. Nun können weitere Daten in den Entscheidungsraum projiziert und klassifiziert werden. Dabei wird jeder Datenpunkt der Klasse zugeordnet, bei der die Mahalonobis-Distanz zum Mittelpunkt der Klasse minimal ist. Sowohl die Aufnahme der Bewegung als auch die Bestimmung der Rotationszeitreihen funktioniert unter den bei der Bachelorarbeit vorliegenden Bedingungen zuverlässig. Es kam allerdings bei der Bewegungsart “Laufen” bei 8 aus 200 Bildfolgen zu Fehlern bei der Rotationsbestimmung; diese Bildfolgen wurden aus der Datenmenge ausgeschieden. Als Ergebnis der Trainingsphase ergaben sich neben der Projektionsmatrix in den Entscheidungsraum die Verteilungen der projizierten Trainingsdaten. Die Klassen sind im Entscheidungsraum gut unterscheidbar. Lediglich zwischen den Klassen “Gehen” und “Hinken” existieren Ausreißer, die nahe am Mittelwert der jeweils anderen Klasse liegen. Der implementierte Algorithmus ist in der Lage, die einzelnen Bewegungsmuster zuverlässig zu entscheiden. Von 201 Testdatensätzen konnten 199 korrekt zugeordnet werden. Die Fehlzuordnungen traten zwischen den Klassen “Gehen” und ”Hinken” auf. Der zur Klassifikation der Bewegungsmuster verwendete Ansatz lässt sich gut auf die Analyse weiterer Bewegungsmuster ausweiten.

    @MastersThesis{schmeing2008analyse,
    title = {Analyse des Bewegungsmusters von Objekten},
    author = {Schmeing, Benno},
    school = {Instiute of Photogrammetry, University of Bonn},
    year = {2008},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. Richard Steffen},
    type = {Bachelor Thesis},
    abstract = {Die vorliegende Arbeit besch\"aftigt sich mit der Analyse der Bewegungsmuster von Fu{\ss}g\"angern. Wir entwickeln Methoden zur Erfassung, Modellierung und Klassifikation der Bewegung und untersuchen ihre Eignung an realen Daten. F\"ur das implementierte Klassifikationsverfahren w\"ahlten wir einen diskriminativen Ansatz, d.h. Ziel der Klassifikation ist einzig die Unterscheidung verschiedener Bewegungsmuster. Auf die Realisierung eines generativen Ansatzes, der auch die Erzeugung synthetischer Bewegungsmuster erlaubt, wurde verzichtet. Der Algorithmus soll die Bewegungsmuster "Gehen", "Hinken" und "Laufen" anhand von Merkmalen unterscheiden, die aus der Eigenbewegung mehrerer Probanden abgeleitet sind. Als Datengrundlage dienen mittels einer fest an der Brust angebrachten Kamera aufgenommene Rotationszeitreihen der Eigenbewegung der Probanden. F\"ur jedes Bewegungsmuster stehen ca. 200 Videosequenzen von 5 Sekunden L\"ange (je 150 Bilder) zur Verf\"ugung; die Rotationszeitreihen werden aus den Rotationen zwischen aufeinanderfolgenden Bildern erzeugt. Als Merkmalsvektoren f\"ur die Klassifikation dienen die Leistungsspektren der Rotationszeitreihen. Die Klassifikation basiert auf Fisher's Linearer Diskriminante. Dabei werden in der Trainingsphase die Merkmalsvektoren von 378 Bildfolgen bearbeitet. Im ersten Schritt findet eine Projektion in den Entscheidungsraum mittels Linearer Diskriminanzanalyse (LDA) statt. Die Projektion ist so gew\"ahlt, dass sich die Klassen im Entscheidungsraum maximal unterscheiden. Im zweiten Schritt wird die Verteilung der projizierten Datenpunkte f\"ur jede Klasse bestimmt. Eine zuvor durchgef\"uhrte Dimensionsreduktion mittels Hauptkompentenanalyse (PCA) reduziert die Dimension des Klassifikationsproblems und verbessert so die Numerik bei der LDA. Nun k\"onnen weitere Daten in den Entscheidungsraum projiziert und klassifiziert werden. Dabei wird jeder Datenpunkt der Klasse zugeordnet, bei der die Mahalonobis-Distanz zum Mittelpunkt der Klasse minimal ist. Sowohl die Aufnahme der Bewegung als auch die Bestimmung der Rotationszeitreihen funktioniert unter den bei der Bachelorarbeit vorliegenden Bedingungen zuverl\"assig. Es kam allerdings bei der Bewegungsart "Laufen" bei 8 aus 200 Bildfolgen zu Fehlern bei der Rotationsbestimmung; diese Bildfolgen wurden aus der Datenmenge ausgeschieden. Als Ergebnis der Trainingsphase ergaben sich neben der Projektionsmatrix in den Entscheidungsraum die Verteilungen der projizierten Trainingsdaten. Die Klassen sind im Entscheidungsraum gut unterscheidbar. Lediglich zwischen den Klassen "Gehen" und "Hinken" existieren Ausrei{\ss}er, die nahe am Mittelwert der jeweils anderen Klasse liegen. Der implementierte Algorithmus ist in der Lage, die einzelnen Bewegungsmuster zuverl\"assig zu entscheiden. Von 201 Testdatens\"atzen konnten 199 korrekt zugeordnet werden. Die Fehlzuordnungen traten zwischen den Klassen "Gehen" und ''Hinken'' auf. Der zur Klassifikation der
    Bewegungsmuster verwendete Ansatz l\"asst sich gut auf die Analyse weiterer Bewegungsmuster ausweiten.},
    }

  • J. Siegemund, “Trajektorienrekonstruktion von bewegten Objekten aus Stereobildfolgen,” Diploma Thesis Master Thesis, 2008.
    [BibTeX] [PDF]

    Die vorliegende Arbeit beschäftigt sich mit der Rekonstruktion der räumlichen Trajektorienparameter bewegter Objekte anhand von kalibrierten Stereobildsequenzen. Zur Lösung dieses Problems wird ein Verfahren auf der Grundlage eines robusten Ausgleichungsmodells eingeführt. Als Eingabedaten dienen vorsegmentierte Bildpunkte des Objektes mit bekannter stereoskopischer und temporaler Zuordnung. Auf Basis dieser Bildinformation wird zusätzlich zu den Trajektorienparametern eine dreidimensionale Punktwolke in einem lokalen Objektsystem geschätzt, welche Hinweise auf Form und Ausmaße des beobachteten Objektes liefert. Darüber hinaus werden Techniken zur Steigerung der Effizienz und Robustheit des Verfahrens vorgestellt und es wird erläutert, wie mögliches Vorwissen in den Ausgleichungsprozess eingebracht werden kann. Der Anwendungsfokus in Beispielen und Ergebnissen liegt auf der Bestimmung der Trajektorien von Fremdfahrzeugen mittels Eigenfahrzeugsensorik zum Zwecke der Kollisionsvermeidung. Diese Informationen sind für Fahrassistenzsysteme von großer Bedeutung und für die Daimler AG als Kooperationspartner dieser Arbeit von besonderem Interesse. Das Verfahren selbst wird jedoch auf kein spezielles Anwendungsgebiet beschränkt. Anhand von Experimenten auf simulierten Szenen wird ein systematischer Fehler in den geschätzten Objektpositionen beobachtet. Das Auftreten dieses Fehlers wird motiviert und Methoden zur Behebung werden vorgestellt.Weiterhin zeigen Experimente auf realen Aufnahmen die Notwendigkeit einer zeitlichen Glättung der geschätzten Trajektorienparameter. Aus diesem Grund wird eine adaptive Glättungsmethode eingeführt, deren Strenge darüber hinaus anwendungsbezogen gesteuert werden kann. Die Ergebnisse zeigen, dass das Verfahren, trotz hoher Ausreißeranteile in den Eingabedaten, im Stande ist, die Bewegungstrajektorie eines Objektes mit hoher Genauigkeit und Robustheit zu bestimmen und gleichzeitig die dreidimensionale Form des beobachteten Objektes zu rekonstruieren.

    @MastersThesis{siegemund2008trajektorienrekonstruktion,
    title = {Trajektorienrekonstruktion von bewegten Objekten aus Stereobildfolgen},
    author = {Siegemund, Jan},
    school = {University of Bonn In Zusammenarbeit mit dem Institut f\"ur Informatik der Universit\"at Bonn},
    year = {2008},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Prof. Dr. Daniel Cremers},
    type = {Diploma Thesis},
    abstract = {Die vorliegende Arbeit besch\"aftigt sich mit der Rekonstruktion der r\"aumlichen Trajektorienparameter bewegter Objekte anhand von kalibrierten Stereobildsequenzen. Zur L\"osung dieses Problems wird ein Verfahren auf der Grundlage eines robusten Ausgleichungsmodells eingef\"uhrt. Als Eingabedaten dienen vorsegmentierte Bildpunkte des Objektes mit bekannter stereoskopischer und temporaler Zuordnung. Auf Basis dieser Bildinformation wird zus\"atzlich zu den Trajektorienparametern eine dreidimensionale Punktwolke in einem lokalen Objektsystem gesch\"atzt, welche Hinweise auf Form und Ausma{\ss}e des beobachteten Objektes liefert. Dar\"uber hinaus werden Techniken zur Steigerung der Effizienz und Robustheit des Verfahrens vorgestellt und es wird erl\"autert, wie m\"ogliches Vorwissen in den Ausgleichungsprozess eingebracht werden kann. Der Anwendungsfokus in Beispielen und Ergebnissen liegt auf der Bestimmung der Trajektorien von Fremdfahrzeugen mittels Eigenfahrzeugsensorik zum Zwecke der Kollisionsvermeidung. Diese Informationen sind f\"ur Fahrassistenzsysteme von gro{\ss}er Bedeutung und f\"ur die Daimler AG als Kooperationspartner dieser Arbeit von besonderem Interesse. Das Verfahren selbst wird jedoch auf kein spezielles Anwendungsgebiet beschr\"ankt. Anhand von Experimenten auf simulierten Szenen wird ein systematischer Fehler in den gesch\"atzten Objektpositionen beobachtet. Das Auftreten dieses Fehlers wird motiviert und Methoden zur Behebung werden vorgestellt.Weiterhin zeigen Experimente auf realen Aufnahmen die Notwendigkeit einer zeitlichen Gl\"attung der gesch\"atzten Trajektorienparameter. Aus diesem Grund wird eine adaptive Gl\"attungsmethode eingef\"uhrt, deren Strenge dar\"uber hinaus anwendungsbezogen gesteuert werden kann. Die Ergebnisse zeigen, dass das Verfahren, trotz hoher Ausrei{\ss}eranteile in den Eingabedaten, im Stande ist, die Bewegungstrajektorie eines Objektes mit hoher Genauigkeit und Robustheit zu bestimmen und gleichzeitig die dreidimensionale Form des beobachteten Objektes zu rekonstruieren.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Siegemund2008Trajektorienrekonstruktion.pdf},
    }

  • C. Stachniss, M. Bennewitz, G. Grisetti, S. Behnke, and W. Burgard, “How to Learn Accurate Grid Maps with a Humanoid,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Pasadena, CA, USA, 2008.
    [BibTeX] [PDF]
    [none]
    @InProceedings{stachniss2008,
    title = {How to Learn Accurate Grid Maps with a Humanoid},
    author = {Stachniss, C. and Bennewitz, M. and Grisetti, G. and Behnke, S. and Burgard, W.},
    booktitle = icra,
    year = {2008},
    address = {Pasadena, CA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/stachniss08icra.pdf},
    }

  • C. Stachniss, C. Plagemann, A. Lilienthal, and W. Burgard, “Gas Distribution Modeling using Sparse Gaussian Process Mixture Models,” in Proc. of Robotics: Science and Systems (RSS), Zurich, Switzerland, 2008.
    [BibTeX] [PDF]
    [none]
    @InProceedings{stachniss2008a,
    title = {Gas Distribution Modeling using Sparse Gaussian Process Mixture Models},
    author = {Stachniss, C. and Plagemann, C. and Lilienthal, A. and Burgard, W.},
    booktitle = rss,
    year = {2008},
    address = {Zurich, Switzerland},
    note = {To appear},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/stachniss08rss.pdf},
    }

  • B. Steder, G. Grisetti, C. Stachniss, and W. Burgard, “Learning Visual Maps using Cameras and Inertial Sensors,” in Workshop on Robotic Perception, International Conf. on Computer Vision Theory and Applications, Funchal, Madeira, Portugal, 2008.
    [BibTeX]
    [none]
    @InProceedings{steder2008,
    title = {Learning Visual Maps using Cameras and Inertial Sensors},
    author = {Steder, B. and Grisetti, G. and Stachniss, C. and Burgard, W.},
    booktitle = {Workshop on Robotic Perception, International Conf. on Computer Vision Theory and Applications},
    year = {2008},
    address = {Funchal, Madeira, Portugal},
    note = {To appear},
    abstract = {[none]},
    timestamp = {2014.04.24},
    }

  • R. Steffen, “A Robust Iterative Kalman Filter Based On Implicit Measurement Equations,” Department of Photogrammetry, University of Bonn, TR-IGG-P-2008-08, 2008.
    [BibTeX] [PDF]

    In the field of robotics and computer vision recursive estimation of time dependent processes is one of the key tasks. Usually Kalman filter based techniques are used, which rely on explicit model functions, that directly and explicitly describe the effect of the parameters on the observations. However, some problems naturally result in implicit constraints between the observations and the parameters, for instance all those resulting in homogeneous equation systems. By implicit we mean, that the constraints are given by equations, that are not easily solvable for the observation vector. We derive an iterative extended Kalman filter framework based on implicit measurement equations. In a wide field of applications the possibility to use implicit constraints simplifies the process of specifying suitable measurement equations. As an extension we introduce a robustification technique similar to [Ting et.al 2007] and [Huber 1981], which allows the presented estimation scheme to cope with outliers. Furthermore we will present results for the application of the proposed framework to the structure-from-motion task in the case of an image sequence acquired by an airborne vehicle.

    @TechReport{steffen2008robust,
    title = {A Robust Iterative Kalman Filter Based On Implicit Measurement Equations},
    author = {Steffen, Richard},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2008},
    month = aug,
    number = {TR-IGG-P-2008-08},
    abstract = {In the field of robotics and computer vision recursive estimation of time dependent processes is one of the key tasks. Usually Kalman filter based techniques are used, which rely on explicit model functions, that directly and explicitly describe the effect of the parameters on the observations. However, some problems naturally result in implicit constraints between the observations and the parameters, for instance all those resulting in homogeneous equation systems. By implicit we mean, that the constraints are given by equations, that are not easily solvable for the observation vector. We derive an iterative extended Kalman filter framework based on implicit measurement equations. In a wide field of applications the possibility to use implicit constraints simplifies the process of specifying suitable measurement equations. As an extension we introduce a robustification technique similar to [Ting et.al 2007] and [Huber 1981], which allows the presented estimation scheme to cope with outliers. Furthermore we will present results for the application of the proposed framework to the structure-from-motion task in the case of an image sequence acquired by an airborne vehicle.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Steffen2009Robust.pdf},
    }

  • R. Steffen and W. Förstner, “On Visual Real Time Mapping for Unmanned Aerial Vehicles,” in 21st Congress of the International Society for Photogrammetry and Remote Sensing (ISPRS), Beijing, China, 2008, p. 57-62 Part B3a.
    [BibTeX] [PDF]

    This paper addresses the challenge of a real-time capable vision system in the task of trajectory and surface reconstruction by aerial image sequences. The goal is to present the design, methods and strategies of a real-time capable vision system solving the mapping task for secure navigation of small UAVs with a single camera. This includes the estimation process, map representation, initialization processes, loop closing detection and exploration strategies. The estimation process is based on the Kalman-Filter and a landmark based map representation. We introduce a new initialization method for new observed landmarks. We will show that the initialization process and the exploration strategy has a significant effect on the accuracy of the estimated camera trajectory and of the map.

    @InProceedings{steffen2008visual,
    title = {On Visual Real Time Mapping for Unmanned Aerial Vehicles},
    author = {Steffen, Richard and F\"orstner, Wolfgang},
    booktitle = {21st Congress of the International Society for Photogrammetry and Remote Sensing (ISPRS)},
    year = {2008},
    address = {Beijing, China},
    pages = {57-62 Part B3a},
    abstract = {This paper addresses the challenge of a real-time capable vision system in the task of trajectory and surface reconstruction by aerial image sequences. The goal is to present the design, methods and strategies of a real-time capable vision system solving the mapping task for secure navigation of small UAVs with a single camera. This includes the estimation process, map representation, initialization processes, loop closing detection and exploration strategies. The estimation process is based on the Kalman-Filter and a landmark based map representation. We introduce a new initialization method for new observed landmarks. We will show that the initialization process and the exploration strategy has a significant effect on the accuracy of the estimated camera trajectory and of the map.},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Steffen2008Visual.pdf},
    }

  • S. Steneberg, “Robuste Relative Orientierung kalibrierter Kameras mit Bildkanten,” Diploma Thesis Master Thesis, 2008.
    [BibTeX]
    [none]
    @MastersThesis{steneberg2008robuste,
    title = {Robuste Relative Orientierung kalibrierter Kameras mit Bildkanten },
    author = {Steneberg, Stephan},
    school = {University of Bonn, University of Koblenz In Zusammenarbeit mit der Arbeitsgruppe Aktives Sehen der Universit\"at Koblenz},
    year = {2008},
    note = {Betreuung: Dipl.-Inform. Timo Dickscheid, Prof. Dr.-Ing. Wolfgang F\"orstner},
    type = {Diploma Thesis},
    abstract = {[none]},
    }

  • T. Udelhoven, B. Waske, S. van der Linden, and S. Heitz, “Land-Cover Classification of Hypertemporal Data using Ensemble Systems,” in IEEE International Geoscience and Remote Sensing Symposium (IGARSS), 2008. doi:10.1109/IGARSS.2008.4779524
    [BibTeX]

    This study addresses the problem of multiannual supervised land-cover classification using hypertemporal data from the “Mediterranean Extended Daily One Km AVHRR Data Set” (MEDOKADS) and a decision fusion approach. 10 day NDVI maximum value composite data from the Iberian Peninsula for every year in the observation period (1989 to 2004) were preprocessed using Minimum Noise Fraction (MNF-) transformation. The MNF-scores from each year were then individually pre-classified using support-vector machines (SVM). The continuous outputs from the SVM, which can be interpreted in terms of posterior probabilities, where used to train a second-order SVM classifier to merge the information within consecutive years. The decision fusion strategy significantly increased the classification accuracy compared to pre-classification results. Increasing the temporal range in decision fusion from a two year to five-year period enhanced the total accuracy. The outcomes from the selected approach were compared with another ensemble method (majority voting) and with a single SVM expert that was trained for comparable multiannual periods. The results suggest that decision fusion is superior to the other methods.

    @InProceedings{udelhoven2008land,
    title = {Land-Cover Classification of Hypertemporal Data using Ensemble Systems},
    author = {Udelhoven, T. and Waske, Bj\"orn and van der Linden, Sebastian and Heitz, S.},
    booktitle = {IEEE International Geoscience and Remote Sensing Symposium (IGARSS)},
    year = {2008},
    abstract = {This study addresses the problem of multiannual supervised land-cover classification using hypertemporal data from the "Mediterranean Extended Daily One Km AVHRR Data Set" (MEDOKADS) and a decision fusion approach. 10 day NDVI maximum value composite data from the Iberian Peninsula for every year in the observation period (1989 to 2004) were preprocessed using Minimum Noise Fraction (MNF-) transformation. The MNF-scores from each year were then individually pre-classified using support-vector machines (SVM). The continuous outputs from the SVM, which can be interpreted in terms of posterior probabilities, where used to train a second-order SVM classifier to merge the information within consecutive years. The decision fusion strategy significantly increased the classification accuracy compared to pre-classification results. Increasing the temporal range in decision fusion from a two year to five-year period enhanced the total accuracy. The outcomes from the selected approach were compared with another ensemble method (majority voting) and with a single SVM expert that was trained for comparable multiannual periods. The results suggest that decision fusion is superior to the other methods.},
    doi = {10.1109/IGARSS.2008.4779524},
    keywords = {AD 1989 to 2004;Iberian Peninsula;MEDOKADS;MNF-transformation;Mediterranean Extended Daily One Km AVHRR Data Set;Minimum Noise Fraction transformation;NDVI;decision fusion approach;ensemble systems;hypertemporal data;second-order SVM classifier;supervised land-cover classification;support-vector machines;geophysics computing;image classification;sensor fusion;support vector machines;vegetation;},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • B. Waske and J. A. Benediktsson, “Semi-Supervised Classifier Ensembles for Classifying Remote Sensing Data,” in IEEE International Geoscience and Remote Sensing Symposium (IGARSS), 2008. doi:10.1109/IGARSS.2008.4778938
    [BibTeX]

    The analysis of data sets, which were acquired within different time periods over the same geographical region is interesting for updating land cover maps and operational monitoring systems. In this context an adequate and temporally stable classification approach is worthwhile. In the presented study a classifier ensemble (i.e., random forests) is trained on a multispectral image from an agricultural region from and is successively modified and adapted, to classify a data set from another year. A detailed accuracy assessment clearly demonstrates that the proposed modification of the classifier significantly improves the overall accuracy, whereas a simple transfer of a classifier to a data set from another year is limited and results in a decreased accuracy. Thus the proposed approach can be recommended for classifying multiannual data sets and updating land cover maps.

    @InProceedings{waske2008semi,
    title = {Semi-Supervised Classifier Ensembles for Classifying Remote Sensing Data},
    author = {Waske, Bj\"orn and Benediktsson, Jon Atli},
    booktitle = {IEEE International Geoscience and Remote Sensing Symposium (IGARSS)},
    year = {2008},
    abstract = {The analysis of data sets, which were acquired within different time periods over the same geographical region is interesting for updating land cover maps and operational monitoring systems. In this context an adequate and temporally stable classification approach is worthwhile. In the presented study a classifier ensemble (i.e., random forests) is trained on a multispectral image from an agricultural region from and is successively modified and adapted, to classify a data set from another year. A detailed accuracy assessment clearly demonstrates that the proposed modification of the classifier significantly improves the overall accuracy, whereas a simple transfer of a classifier to a data set from another year is limited and results in a decreased accuracy. Thus the proposed approach can be recommended for classifying multiannual data sets and updating land cover maps.},
    doi = {10.1109/IGARSS.2008.4778938},
    keywords = {agricultural region;data analysis;land cover maps;multispectral image;operational monitoring systems;temporally stable classification approach;agriculture;data analysis;image classification;terrain mapping;vegetation mapping;},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • B. Waske and S. van der Linden, “Classifying multilevel imagery from SAR and optical sensors by decision fusion,” IEEE Transactions on Geoscience and Remote Sensing, vol. 46, iss. 5, p. 1457–1466, 2008. doi:10.1109/TGRS.2008.916089
    [BibTeX]

    A strategy for the joint classification of multiple segmentation levels from multisensor imagery is introduced by using synthetic aperture radar and optical data. At first, the two data sets are separately segmented, creating independent aggregation levels at different scales. Each individual level from the two sensors is then preclassified by a support vector machine (SVM). The original outputs of each SVM, i.e., images showing the distances of the pixels to the hyperplane fitted by the SVM, are used in a decision fusion to determine the final classes. The fusion strategy is based on the application of an additional classifier, which is applied on the preclassification results. Both a second SVM and random forests (RF) were tested for the decision fusion. The results are compared with SVM and RF applied to the full data set without preclassification. Both the integration of multilevel information and the use of multisensor imagery increase the overall accuracy. It is shown that the classification of multilevel-multisource data sets with SVM and RF is feasible and does not require a definition of ideal aggregation levels. The proposed decision fusion approach that applies RF to the preclassification outperforms all other approaches.

    @Article{waske2008classifying,
    title = {Classifying multilevel imagery from SAR and optical sensors by decision fusion},
    author = {Waske, Bj\"orn and van der Linden, Sebastian},
    journal = {IEEE Transactions on Geoscience and Remote Sensing},
    year = {2008},
    month = may,
    number = {5},
    pages = {1457--1466},
    volume = {46},
    abstract = {A strategy for the joint classification of multiple segmentation levels from multisensor imagery is introduced by using synthetic aperture radar and optical data. At first, the two data sets are separately segmented, creating independent aggregation levels at different scales. Each individual level from the two sensors is then preclassified by a support vector machine (SVM). The original outputs of each SVM, i.e., images showing the distances of the pixels to the hyperplane fitted by the SVM, are used in a decision fusion to determine the final classes. The fusion strategy is based on the application of an additional classifier, which is applied on the preclassification results. Both a second SVM and random forests (RF) were tested for the decision fusion. The results are compared with SVM and RF applied to the full data set without preclassification. Both the integration of multilevel information and the use of multisensor imagery increase the overall accuracy. It is shown that the classification of multilevel-multisource data sets with SVM and RF is feasible and does not require a definition of ideal aggregation levels. The proposed decision fusion approach that applies RF to the preclassification outperforms all other approaches.},
    doi = {10.1109/TGRS.2008.916089},
    owner = {waske},
    sn = {0196-2892},
    tc = {29},
    timestamp = {2012.09.04},
    ut = {WOS:000255222800017},
    z8 = {2},
    z9 = {31},
    zb = {3},
    }

  • S. Wenzel, M. Drauschke, and W. Förstner, “Detection of repeated structures in facade images,” Pattern Recognition and Image Analysis, vol. 18, iss. 3, p. 406–411, 2008. doi:10.1134/S1054661808030073
    [BibTeX] [PDF]

    We present a method for detecting repeated structures, which is applied on facade images for describing the regularity of their windows. Our approach finds and explicitly represents repetitive structures and thus gives initial representation of facades. No explicit notion of a window is used; thus, the method also appears to be able to identify other manmade structures, e.g., paths with regular tiles. A method for detection of dominant symmetries is adapted for detection of multiply repeated structures. A compact description of the repetitions is derived from the detected translations in the image by a heuristic search method and the criterion of the minimum description length.

    @Article{wenzel2008detection,
    title = {Detection of repeated structures in facade images},
    author = {Wenzel, Susanne and Drauschke, Martin and F\"orstner, Wolfgang},
    journal = {Pattern Recognition and Image Analysis},
    year = {2008},
    month = sep,
    number = {3},
    pages = {406--411},
    volume = {18},
    abstract = {We present a method for detecting repeated structures, which is applied on facade images for describing the regularity of their windows. Our approach finds and explicitly represents repetitive structures and thus gives initial representation of facades. No explicit notion of a window is used; thus, the method also appears to be able to identify other manmade structures, e.g., paths with regular tiles. A method for detection of dominant symmetries is adapted for detection of multiply repeated structures. A compact description of the repetitions is derived from the detected translations in the image by a heuristic search method and the criterion of the minimum description length.},
    doi = {10.1134/S1054661808030073},
    url = {https://www.ipb.uni-bonn.de/pdfs/Wenzel2008Detection.pdf},
    }

  • S. Wenzel and W. Förstner, “Semi-supervised incremental learning of hierarchical appearance models,” in 21st Congress of the International Society for Photogrammetry and Remote Sensing (ISPRS), Beijing, China, 2008, p. 399–404 Part B3b-2.
    [BibTeX] [PDF]

    We propose an incremental learning scheme for learning a class hierarchy for objects typically occurring multiple in images. Given one example of an object that appears several times in the image, e.g. is part of a repetitive structure, we propose a method for identifying prototypes using an unsupervised clustering procedure. These prototypes are used for building a hierarchical appearance based model of the envisaged class in a supervised manner. For classification of new instances detected in new images we use linear subspace methods that combine discriminative and reconstructive properties. The used methods are chosen to be capable for an incremental update. We test our approach on facade images with repetitive windows and balconies. We use the learned object models to find new instances in other images, e. g. the neighbouring facade and update already learned models with the new instances.

    @InProceedings{wenzel2008semi,
    title = {Semi-supervised incremental learning of hierarchical appearance models},
    author = {Wenzel, Susanne and F\"orstner, Wolfgang},
    booktitle = {21st Congress of the International Society for Photogrammetry and Remote Sensing (ISPRS)},
    year = {2008},
    address = {Beijing, China},
    pages = {399--404 Part B3b-2},
    abstract = {We propose an incremental learning scheme for learning a class hierarchy for objects typically occurring multiple in images. Given one example of an object that appears several times in the image, e.g. is part of a repetitive structure, we propose a method for identifying prototypes using an unsupervised clustering procedure. These prototypes are used for building a hierarchical appearance based model of the envisaged class in a supervised manner. For classification of new instances detected in new images we use linear subspace methods that combine discriminative and reconstructive properties. The used methods are chosen to be capable for an incremental update. We test our approach on facade images with repetitive windows and balconies. We use the learned object models to find new instances in other images, e. g. the neighbouring facade and update already learned models with the new instances.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Wenzel2008Semi.pdf},
    }

  • K. M. Wurm, C. Stachniss, and W. Burgard, “Coordinated Multi-Robot Exploration using a Segmentation of the Environment,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), Nice, France, 2008.
    [BibTeX] [PDF]
    [none]
    @InProceedings{wurm2008,
    title = {Coordinated Multi-Robot Exploration using a Segmentation of the Environment},
    author = {K.M. Wurm and Stachniss, C. and W. Burgard},
    booktitle = iros,
    year = {2008},
    address = {Nice, France},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/wurm08iros.pdf},
    }

  • Robotics: Science and Systems III, W. Burgard, O. Brock, and C. Stachniss, Eds., MIT Press, 2008.
    [BibTeX]
    [none]
    @Book{burgard2008,
    title = {Robotics: Science and Systems III},
    editor = {Burgard, W. and Brock, O. and Stachniss, C.},
    publisher = {MIT Press},
    year = {2008},
    month = {March},
    note = {In press},
    abstract = {[none]},
    isbn = {0262524848},
    timestamp = {2014.04.24},
    }

2007

  • W. Burgard, C. Stachniss, and D. Haehnel, “Mobile Robot Map Learning from Range Data in Dynamic Environments,” in Autonomous Navigation in Dynamic Environments, C. Laugier and R. Chatila, Eds., springer, 2007, vol. 35.
    [BibTeX]
    [none]
    @InCollection{burgard2007,
    title = {Mobile Robot Map Learning from Range Data in Dynamic Environments},
    author = {Burgard, W. and Stachniss, C. and Haehnel, D.},
    booktitle = {Autonomous Navigation in Dynamic Environments},
    publisher = springer,
    year = {2007},
    editor = {Laugier, C. and Chatila, R.},
    series = springerstaradvanced,
    volume = {35},
    abstract = {[none]},
    timestamp = {2014.04.24},
    }

  • F. De Sanctis, “Untersuchungen zur automatisierten Generierung von digitalen Oberflächenmodellen aus mehreren extrem großmaßstäbigen Luftbildern,” Master Thesis, 2007.
    [BibTeX]

    Die vorliegende Arbeit untersucht zwei automatische Verfahren zur dichten Oberflächenrekonstruktion mit großmaßstäbigen Bildern. Dabei wird von einer bekannten inneren sowie äußere Orientierung ausgegangen. Die Verfahren liegen mit den Programmen MATCH-T der Firma Inpho GmbH sowie eine Implementation des Semi-Global-Matching Blockmatch vor. Insbesondere soll auf die zu erreichende Höhengenauigkeit aus Bildanordnungen für den Standard-Luftbildfall eingegangen werden.

    @MastersThesis{desanctis2007untersuchungen,
    title = {Untersuchungen zur automatisierten Generierung von digitalen Oberfl\"achenmodellen aus mehreren extrem gro{\ss}ma{\ss}st\"abigen Luftbildern},
    author = {De Sanctis, Federica},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2007},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. Richard Steffen},
    abstract = {Die vorliegende Arbeit untersucht zwei automatische Verfahren zur dichten Oberfl\"achenrekonstruktion mit gro{\ss}ma{\ss}st\"abigen Bildern. Dabei wird von einer bekannten inneren sowie \"au{\ss}ere Orientierung ausgegangen. Die Verfahren liegen mit den Programmen MATCH-T der Firma Inpho GmbH sowie eine Implementation des Semi-Global-Matching Blockmatch vor. Insbesondere soll auf die zu erreichende H\"ohengenauigkeit aus Bildanordnungen f\"ur den Standard-Luftbildfall eingegangen werden.},
    city = {Bonn},
    }

  • M. Drauschke, A. Brunn, K. Kulschewski, and W. Förstner, “Automatic Dodging of Aerial Images,” in Publikationen der DGPF: Von der Medizintechnik bis zur Planetenforschung – Photogrammetrie und Fernerkundung für das 21. Jahrhundert, Muttenz, Basel, 2007, p. 173–180.
    [BibTeX] [PDF]

    We present an automated approach for the dodging of images, with which we edit digital images as it is usually done with analogue images in dark-rooms. Millions of aerial images of all battle fields were taken during the Second World War. They were intensively used, e.g. for the observation of military movements, the documentation of success and failure of military operations and further planning. Today, the information of these images supports the removal of explosives of the Second World War and the identi-fication of dangerous waste in the soil. In North Rhine-Westphalia, approximately 300.000 aerial images are scanned to handle the huge amount of available data efficiently. The scanning is done with a gray value depth of 12 bits and a pixel size of 21 {\mu}m to gain both, a high radiometric and a high geometric resolution of the images. Due to the photographic process used in the 1930s and 1940s and several reproductions, the digitized images are exposed locally very differently. Therefore, the images shall be improved by automated dodging. Global approaches mostly returned unsatisfying results. Therefore, we present a new approach, which is based on local histogram equalization. Other methods as spreading the histogram or linear transformations of the histogram manipulate the images either too much or not enough. For the implementation of our approach, we focus not only on the quality of the resulting images, but also on robustness and performance of the algorithm. Thus, the technique can also be used for other applications concerning image improvements.

    @InProceedings{drauschke2007automatic,
    title = {Automatic Dodging of Aerial Images},
    author = {Drauschke, Martin and Brunn, Ansgar and Kulschewski, Kai and F\"orstner, Wolfgang},
    booktitle = {Publikationen der DGPF: Von der Medizintechnik bis zur Planetenforschung - Photogrammetrie und Fernerkundung f\"ur das 21. Jahrhundert},
    year = {2007},
    address = {Muttenz, Basel},
    editor = {Seyfert, Eckhardt},
    month = jun,
    pages = {173--180},
    publisher = {DGPF},
    volume = {16},
    abstract = {We present an automated approach for the dodging of images, with which we edit digital images as it is usually done with analogue images in dark-rooms. Millions of aerial images of all battle fields were taken during the Second World War. They were intensively used, e.g. for the observation of military movements, the documentation of success and failure of military operations and further planning. Today, the information of these images supports the removal of explosives of the Second World War and the identi-fication of dangerous waste in the soil. In North Rhine-Westphalia, approximately 300.000 aerial images are scanned to handle the huge amount of available data efficiently. The scanning is done with a gray value depth of 12 bits and a pixel size of 21 {\mu}m to gain both, a high radiometric and a high geometric resolution of the images. Due to the photographic process used in the 1930s and 1940s and several reproductions, the digitized images are exposed locally very differently. Therefore, the images shall be improved by automated dodging. Global approaches mostly returned unsatisfying results. Therefore, we present a new approach, which is based on local histogram equalization. Other methods as spreading the histogram or linear transformations of the histogram manipulate the images either too much or not enough. For the implementation of our approach, we focus not only on the quality of the resulting images, but also on robustness and performance of the algorithm. Thus, the technique can also be used for other applications concerning image improvements.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Drauschke2007Automatic.pdf},
    }

  • W. Förstner and R. Steffen, “Online geocoding and evaluation of large scale imagery without GPS,” Photogrammetric Week, Heidelberg, vol. Wichmann Verlag, 2007.
    [BibTeX] [PDF]

    Large scale imagery will be increasingly available due to the low cost of video cameras and unmanned aerial vehicles. Their use is broad: the documentation of traffic accidents, the effects of thunderstorms onto agricultural farms, the 3Dstructure of industrial plants or the monitoring of archeological excavation. The value of imagery depends on the availability of (1) information about the place and date during data capture, (2) of information about the 3D-structure of the object and (3) of information about the class or identity of the objects in the scene. Geocoding, problem (1), usually relies the availability of GPS-information, which however limits the use of imagery to outdoor applications. The paper discusses methods for geocoding and geometrical evaluation of such imagery and especially adresses the question in how far the methods can do without GPS.

    @Article{forstner2007online,
    title = {Online geocoding and evaluation of large scale imagery without GPS},
    author = {F\"orstner, Wolfgang and Steffen, Richard},
    journal = {Photogrammetric Week, Heidelberg},
    year = {2007},
    volume = {Wichmann Verlag},
    abstract = {Large scale imagery will be increasingly available due to the low cost of video cameras and unmanned aerial vehicles. Their use is broad: the documentation of traffic accidents, the effects of thunderstorms onto agricultural farms, the 3Dstructure of industrial plants or the monitoring of archeological excavation. The value of imagery depends on the availability of (1) information about the place and date during data capture, (2) of information about the 3D-structure of the object and (3) of information about the class or identity of the objects in the scene. Geocoding, problem (1), usually relies the availability of GPS-information, which however limits the use of imagery to outdoor applications. The paper discusses methods for geocoding and geometrical evaluation of such imagery and especially adresses the question in how far the methods can do without GPS.},
    editor = {D. Fritsch},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner2007Online.pdf},
    }

  • N. Fischer, “3D-Reconstruction from Multiple Images on the GPU,” Diplomarbeit Master Thesis, 2007.
    [BibTeX]

    Die automatische Rekonstruktion der sichtbaren Oberfläche eines Objekts aus mehreren Bildern stellt ein in seiner Allgemeinheit ungelöstes Problem. Unter günstigen Bedingungen sind jedoch erfolgreiche Ansätze vorhanden. Die schnelle Implementation solcher Ansätze auf Graphischen Prozessoren (GPU’s) stellt wegen der Entwicklung leistungsfähiger Schnittstellen und Programmiersprachen einen interessanten Ansatz dar. Dazu sind jedoch die Algorithmen auf ihre Parallelisierbarkeit zu untersuchen und zwar speziell in Bezug auf die von GPU’s bereitgestellten Strukturen. In der Arbeit soll ein Verfahren zur Oberflächenrekonstruktion in Hinblick auf seine Eignung für die Implementation auf einer GPU konzeptionell untersucht, prototypisch realisiert und untersucht werden.

    @MastersThesis{fischer20073d,
    title = {3D-Reconstruction from Multiple Images on the GPU},
    author = {Fischer, Norbert},
    school = {Institute of Photogrammetry, University of Bonn In Zusammenarbeit mit dem Institut f\"ur Informatik der Universit\"at Bonn},
    year = {2007},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, PD Dr. Volker Steinhage},
    type = {Diplomarbeit},
    abstract = {Die automatische Rekonstruktion der sichtbaren Oberfl\"ache eines Objekts aus mehreren Bildern stellt ein in seiner Allgemeinheit ungel\"ostes Problem. Unter g\"unstigen Bedingungen sind jedoch erfolgreiche Ans\"atze vorhanden. Die schnelle Implementation solcher Ans\"atze auf Graphischen Prozessoren (GPU's) stellt wegen der Entwicklung leistungsf\"ahiger Schnittstellen und Programmiersprachen einen interessanten Ansatz dar. Dazu sind jedoch die Algorithmen auf ihre Parallelisierbarkeit zu untersuchen und zwar speziell in Bezug auf die von GPU's bereitgestellten Strukturen. In der Arbeit soll ein Verfahren zur Oberfl\"achenrekonstruktion in Hinblick auf seine Eignung f\"ur die Implementation auf einer GPU konzeptionell untersucht, prototypisch realisiert und untersucht werden.},
    city = {Bonn},
    }

  • C. Garvert, “Untersuchungen des SURF-Deskriptors zur Bildfolgenanalyse,” Diplomarbeit Master Thesis, 2007.
    [BibTeX]

    Im Forschungsbereich der gleichzeitigen Lokalisierung und Kartierung aus monokularen Bildfolgen ist das Verfolgen von Bildpunkten ein wesentlicher Bestandteil. Dies erfordert, dass die Bildpunkte in jedem Folgebild identifizierbar sind und nur geringe Disparitäten vorliegen. Abschattungen oder schnelle Rotationen der Kamera können den Verlust der Bildpunktverfolgung bedeuten. In den letzten Jahren wurden verschiedene Deskriptoren zur Beschreibung der Punktumgebung entwickelt, mit denen es möglich ist, eine Zuordnung von Punkten auch bei extrem großen Disparitäten zu ermöglichen. Insbesondere rotations- und skaleninvariante Deskriptoren haben in den letzten Jahren massiv an Bedeutung gewonnen. In der Diplomarbeit soll der von Bay et al. (2006) vorgestellte rotations- und skaleninvariante Punktdeskriptor SURF implementiert werden. Im Gegensatz zum Sift Deskriptor von Lowe (2004) werden beim SURF-Deskriptor Integral-Bilder zur wesentlich schnelleren Berechnung verwendet. In der Arbeit soll untersucht werden, welche Parameter des Punktdeskriptors Genauigkeit und Geschwindigkeit beeinflussen. Da der Deskriptor auf einer anderen Art von Punktmerkmalen basiert, soll überprüft werden, bei welchen Typen von Bilddaten der SURF Deskriptor zum Sift-Deskriptor über- bzw. unterlegen ist. Der SURF Deskriptor soll an künstlichen Daten und wenn möglich an realen Daten (Luftbilder) getestet und evaluiert werden.

    @MastersThesis{garvert2007untersuchungen,
    title = {Untersuchungen des SURF-Deskriptors zur Bildfolgenanalyse},
    author = {Garvert, Christina},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2007},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. Richard Steffen},
    type = {Diplomarbeit},
    abstract = {Im Forschungsbereich der gleichzeitigen Lokalisierung und Kartierung aus monokularen Bildfolgen ist das Verfolgen von Bildpunkten ein wesentlicher Bestandteil. Dies erfordert, dass die Bildpunkte in jedem Folgebild identifizierbar sind und nur geringe Disparit\"aten vorliegen. Abschattungen oder schnelle Rotationen der Kamera k\"onnen den Verlust der Bildpunktverfolgung bedeuten. In den letzten Jahren wurden verschiedene Deskriptoren zur Beschreibung der Punktumgebung entwickelt, mit denen es m\"oglich ist, eine Zuordnung von Punkten auch bei extrem gro{\ss}en Disparit\"aten zu erm\"oglichen. Insbesondere rotations- und skaleninvariante Deskriptoren haben in den letzten Jahren massiv an Bedeutung gewonnen. In der Diplomarbeit soll der von Bay et al. (2006) vorgestellte rotations- und skaleninvariante Punktdeskriptor SURF implementiert werden. Im Gegensatz zum Sift Deskriptor von Lowe (2004) werden beim SURF-Deskriptor Integral-Bilder zur wesentlich schnelleren Berechnung verwendet. In der Arbeit soll untersucht werden, welche Parameter des Punktdeskriptors Genauigkeit und Geschwindigkeit beeinflussen. Da der Deskriptor auf einer anderen Art von Punktmerkmalen basiert, soll \"uberpr\"uft werden, bei welchen Typen von Bilddaten der SURF Deskriptor zum Sift-Deskriptor \"uber- bzw. unterlegen ist. Der SURF Deskriptor soll an k\"unstlichen Daten und wenn m\"oglich an realen Daten (Luftbilder) getestet und evaluiert werden.},
    city = {Bonn},
    }

  • S. Grau, “Untersuchungen zur Rekonstruktion von Bohrungen aus Stereobildern,” Diplomarbeit Master Thesis, 2007.
    [BibTeX]

    In der Industrie werden stereoskopische Messtechniken zur Prüfung von Werkstücken bereits erfolgreich eingesetzt. Dabei werden höchste Anforderungen an die Genauigkeit gestellt. Insbesondere metallene Oberflächen stellen durch ihr schwer vorhersehbares Reflektionsverhalten eine besondere Herausforderung dar. Eine Oberflächenrekonstruktion wird heute im Allgemeinen punktweise durch Einsatz von strukturiertem Licht gelöst. Damit werden zwar hoch genau Oberflächen vermessen, jedoch bleibt die Bestimmung von präzisen Koordinaten von Bohrlöchern ein bisher ungelöstes Problem. Diese Diplomarbeit setzt sich zum Ziel, die Position eines mit einem Stereosystem beobachteten Bohrlochs (Kreis im Raum) im photogrammetrischen System präzise zu bestimmen. Grundlage der Rekonstruktion sind subpixelgenaue Kanten des Bohrlochs. Dabei treten auch Kanten aus Spiegelungen auf. In einem ersten Schritt soll untersucht werden, wie Näherungswerte bestimmt werden können. In einem zweiten Schritt ist ein robustes Ausgleichsmodell zu realisieren. Es soll untersucht werden, unter welchen Bedingungen welche Genauigkeiten der Rekonstruktion der Bohrloch-Koordinate erreicht werden können.

    @MastersThesis{grau2007untersuchungen,
    title = {Untersuchungen zur Rekonstruktion von Bohrungen aus Stereobildern},
    author = {Grau, Stephan},
    year = {2007},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. Richard Steffen},
    type = {Diplomarbeit},
    abstract = {In der Industrie werden stereoskopische Messtechniken zur Pr\"ufung von Werkst\"ucken bereits erfolgreich eingesetzt. Dabei werden h\"ochste Anforderungen an die Genauigkeit gestellt. Insbesondere metallene Oberfl\"achen stellen durch ihr schwer vorhersehbares Reflektionsverhalten eine besondere Herausforderung dar. Eine Oberfl\"achenrekonstruktion wird heute im Allgemeinen punktweise durch Einsatz von strukturiertem Licht gel\"ost. Damit werden zwar hoch genau Oberfl\"achen vermessen, jedoch bleibt die Bestimmung von pr\"azisen Koordinaten von Bohrl\"ochern ein bisher ungel\"ostes Problem. Diese Diplomarbeit setzt sich zum Ziel, die Position eines mit einem Stereosystem beobachteten Bohrlochs (Kreis im Raum) im photogrammetrischen System pr\"azise zu bestimmen. Grundlage der Rekonstruktion sind subpixelgenaue Kanten des Bohrlochs. Dabei treten auch Kanten aus Spiegelungen auf. In einem ersten Schritt soll untersucht werden, wie N\"aherungswerte bestimmt werden k\"onnen. In einem zweiten Schritt ist ein robustes Ausgleichsmodell zu realisieren. Es soll untersucht werden, unter welchen Bedingungen welche Genauigkeiten der Rekonstruktion der Bohrloch-Koordinate erreicht werden k\"onnen.},
    city = {Bonn},
    }

  • G. Grisetti, S. Grzonka, C. Stachniss, P. Pfaff, and W. Burgard, “Efficient Estimation of Accurate Maximum Likelihood Maps in 3D,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), San Diego, CA, USA, 2007.
    [BibTeX] [PDF]
    [none]
    @InProceedings{grisetti2007c,
    title = {Efficient Estimation of Accurate Maximum Likelihood Maps in 3D},
    author = {Grisetti, G. and Grzonka, S. and Stachniss, C. and Pfaff, P. and Burgard, W.},
    booktitle = iros,
    year = {2007},
    address = {San Diego, CA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/grisetti07iros.pdf},
    }

  • G. Grisetti, C. Stachniss, and W. Burgard, “Improved Techniques for Grid Mapping with Rao-Blackwellized Particle Filters,” ieeetransrob, vol. 23, iss. 1, p. 34–46, 2007.
    [BibTeX] [PDF]
    [none]
    @Article{grisetti2007a,
    title = {Improved Techniques for Grid Mapping with Rao-Blackwellized Particle Filters},
    author = {Grisetti, G. and Stachniss, C. and Burgard, W.},
    journal = ieeetransrob,
    year = {2007},
    number = {1},
    pages = {34--46},
    volume = {23},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/grisetti07tro.pdf},
    }

  • G. Grisetti, C. Stachniss, S. Grzonka, and W. Burgard, “A Tree Parameterization for Efficiently Computing Maximum Likelihood Maps using Gradient Descent,” in Proc. of Robotics: Science and Systems (RSS), Atlanta, GA, USA, 2007.
    [BibTeX] [PDF]
    [none]
    @InProceedings{grisetti2007b,
    title = {A Tree Parameterization for Efficiently Computing Maximum Likelihood Maps using Gradient Descent},
    author = {Grisetti, G. and Stachniss, C. and Grzonka, S. and Burgard, W.},
    booktitle = rss,
    year = {2007},
    address = {Atlanta, GA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/grisetti07rss.pdf},
    }

  • G. Grisetti, G. D. Tipaldi, C. Stachniss, W. Burgard, and D. Nardi, “Fast and Accurate SLAM with Rao-Blackwellized Particle Filters,” Journal on Robotics and Autonomous Systems (RAS), vol. 55, iss. 1, p. 30–38, 2007.
    [BibTeX] [PDF]
    [none]
    @Article{grisetti2007,
    title = {Fast and Accurate {SLAM} with Rao-Blackwellized Particle Filters},
    author = {Grisetti, G. and Tipaldi, G.D. and Stachniss, C. and Burgard, W. and Nardi, D.},
    journal = jras,
    year = {2007},
    number = {1},
    pages = {30--38},
    volume = {55},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/grisetti07jras.pdf},
    }

  • V. Heinzel, B. Waske, M. Braun, and G. Menz, “Remote sensing data assimilation for regional crop growth modelling in the region of Bonn (Germany),” in IEEE International Geoscience and Remote Sensing Symposium (IGARSS), 2007. doi:10.1109/IGARSS.2007.4423636
    [BibTeX]

    The study investigates the possibilities to improve the performance of CERES-Wheat crop growth model by assimilating information derived by optical and SAR Earth observation data. Biophysical parameter retrieval was done with the water cloud model for SAR data and the CLAIR model was applied to multispectral imagery. The CERES -Wheat model was calibrated using ground truth information. The re-initialization method with an adjustable planting date was selected as assimilation strategy. Modelling results generally improved by using all different kind of remote sensing data. However, best results were achieved by using information of the optical sensors only and not by a synergetic time series of all available data.

    @InProceedings{heinzel2007remote,
    title = {Remote sensing data assimilation for regional crop growth modelling in the region of Bonn (Germany)},
    author = {Heinzel, V. and Waske, Bj\"orn and Braun, M. and Menz, Gunter.},
    booktitle = {IEEE International Geoscience and Remote Sensing Symposium (IGARSS)},
    year = {2007},
    abstract = {The study investigates the possibilities to improve the performance of CERES-Wheat crop growth model by assimilating information derived by optical and SAR Earth observation data. Biophysical parameter retrieval was done with the water cloud model for SAR data and the CLAIR model was applied to multispectral imagery. The CERES -Wheat model was calibrated using ground truth information. The re-initialization method with an adjustable planting date was selected as assimilation strategy. Modelling results generally improved by using all different kind of remote sensing data. However, best results were achieved by using information of the optical sensors only and not by a synergetic time series of all available data.},
    doi = {10.1109/IGARSS.2007.4423636},
    keywords = {Bonn;CERES-wheat crop growth model;CLAIR model;Germany;SAR Earth observation data;biophysical parameter retrieval;data assimilation;ground truth information;information assimilation;multispectral imagery;optical data;optical sensors;regional crop growth modelling;remote sensing;water cloud model;crops;data assimilation;geophysical signal processing;radar imaging;remote sensing by radar;spectral analysis;synthetic aperture radar;vegetation mapping;},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • K. Herms, “Aufbau einer Datenbank unter Matlab zur Verwaltung von Bildsegmenten,” Department of Photogrammetry, University of Bonn, TR-IGG-P-2007-05, 2007.
    [BibTeX] [PDF]

    Ein großer Aufgabenbereich der Bildverarbeitung ist die Merkmalsextraktion. Hierbei ist es zunächst erforderlich, die Bilder durch eine Segmentierung in konsistente Landkarten zu überführen. Wir verwenden zur Segmentierung einen Wasserscheidenalgorithmus. Die Verwaltung der Landkarten sollte möglichst effizient erfolgen. Der vorliegende Report erläutert zunächst unterschiedliche Speicherstrukturen und geht auf einen möglichen Ansatz zur konsistenten Umwandlung von Rasterdaten in Vektordaten ein. In einem zweiten Tiel beschäftigen wir uns mit dem Aufbau einer Datenbank zur Verwaltung dieser Vektordaten von Matlab aus.

    @TechReport{herms2007aufbau,
    title = {Aufbau einer Datenbank unter Matlab zur Verwaltung von Bildsegmenten},
    author = {Herms, Kerstin},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2007},
    month = {August},
    number = {TR-IGG-P-2007-05},
    abstract = {Ein gro{\ss}er Aufgabenbereich der Bildverarbeitung ist die Merkmalsextraktion. Hierbei ist es zun\"achst erforderlich, die Bilder durch eine Segmentierung in konsistente Landkarten zu \"uberf\"uhren. Wir verwenden zur Segmentierung einen Wasserscheidenalgorithmus. Die Verwaltung der Landkarten sollte m\"oglichst effizient erfolgen. Der vorliegende Report erl\"autert zun\"achst unterschiedliche Speicherstrukturen und geht auf einen m\"oglichen Ansatz zur konsistenten Umwandlung von Rasterdaten in Vektordaten ein. In einem zweiten Tiel besch\"aftigen wir uns mit dem Aufbau einer Datenbank zur Verwaltung dieser Vektordaten von Matlab aus.},
    keywords = {Segmentation, Algorithmic Geometry, GIS},
    url = {https://www.ipb.uni-bonn.de/pdfs/Herms2007Aufbau.pdf},
    }

  • K. Herms, “Exploration des Skalenraumes bezüglich der Gebäudeextraktion in terrestrischen Farbbildern,” Diplomarbeit Master Thesis, 2007.
    [BibTeX] [PDF]

    Die Gebäudedetektion in digitalen Bildern stellt wegen der Komplexität der Objekte ein schwieriges Problem der Mustererkennung dar. In neueren Ansätzen zur Gebäudeextraktion wird das Bild in verschiedenen Auflösungsstufen, im sog. Skalenraum analysiert. Auf diese Weise können für die Bildinterpretation hinderliche Details ausgeblendet werden. Dabei spielen stabile Regionen, d. s. Regionen die sich bei Veränderung der Auflösung wenig ändern, eine besondere Rolle. Von stabilen Regionen im Skalenraum kann man auf kontraststarke Übergänge zwischen Objekten im Bild schließen [Drauschke et al. 2006: Stabilität von Regionen im Skalenraum]. Diese Diplomarbeit soll untersuchen, ob über stabilen Bildregionen eine Klassifikation von Gebäuden und anderen Objekten durchgeführt werden kann. Dazu sollen Merkmale der stabilen Regionen ausgewählt und bestimmt werden und diese Merkmale auf ihre Skalenabhängigkeit hin überprüft werden. Mit Hilfe eines geeignet gewählten Klassifikators sollen Gebäude und andere Objekte identifiziert werden. An Hand von terrestrischen Bildern soll bewertet werden, ob die u. U. skalenabhängigen Merkmale für die Gebäudeextraktion geeignet sind.

    @MastersThesis{herms2007exploration,
    title = {Exploration des Skalenraumes bez\"uglich der Geb\"audeextraktion in terrestrischen Farbbildern},
    author = {Herms, Kerstin},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2007},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Inform. Martin Drauschke},
    type = {Diplomarbeit},
    abstract = {Die Geb\"audedetektion in digitalen Bildern stellt wegen der Komplexit\"at der Objekte ein schwieriges Problem der Mustererkennung dar. In neueren Ans\"atzen zur Geb\"audeextraktion wird das Bild in verschiedenen Aufl\"osungsstufen, im sog. Skalenraum analysiert. Auf diese Weise k\"onnen f\"ur die Bildinterpretation hinderliche Details ausgeblendet werden. Dabei spielen stabile Regionen, d. s. Regionen die sich bei Ver\"anderung der Aufl\"osung wenig \"andern, eine besondere Rolle. Von stabilen Regionen im Skalenraum kann man auf kontraststarke \"Uberg\"ange zwischen Objekten im Bild schlie{\ss}en [Drauschke et al. 2006: Stabilit\"at von Regionen im Skalenraum]. Diese Diplomarbeit soll untersuchen, ob \"uber stabilen Bildregionen eine Klassifikation von Geb\"auden und anderen Objekten durchgef\"uhrt werden kann. Dazu sollen Merkmale der stabilen Regionen ausgew\"ahlt und bestimmt werden und diese Merkmale auf ihre Skalenabh\"angigkeit hin \"uberpr\"uft werden. Mit Hilfe eines geeignet gew\"ahlten Klassifikators sollen Geb\"aude und andere Objekte identifiziert werden. An Hand von terrestrischen Bildern soll bewertet werden, ob die u. U. skalenabh\"angigen Merkmale f\"ur die Geb\"audeextraktion geeignet sind.},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Herms2007Exploration.pdf},
    }

  • K. Herms, “Extraktion relevanter Bildkanten für die Gebäudedetektion (umgesetzt in Matlab),” Department of Photogrammetry, University of Bonn, TR-IGG-P-2007-05, 2007.
    [BibTeX] [PDF]

    Um Gebäude in Bildern detektieren detektieren zu können, greifen wir als ein Merkmal auf Bildkanten zurück. Kanten liegen zum einen aus der Bildsegmentierung vor, können aber auch gezielt mit einer Kantenextraktion aus dem quadratisch Gradientenbild gewonnen werden. Durch die Auswahl von Kanten, die in beiden F ällen auftreten, wollen wir eine Einschränkung auf möglichst relevante Kanten vornehmen. Diese Arbeit beschäftigt sich mit dem Au?nden (und der Gewichtung) von Kantenzügen eines segmentierten Bildes, die einer Kante im quadratidschen Gradientenbild zugeordnet werden können.

    @TechReport{herms2007extraktion,
    title = {Extraktion relevanter Bildkanten f\"ur die Geb\"audedetektion (umgesetzt in Matlab)},
    author = {Herms, Kerstin},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2007},
    number = {TR-IGG-P-2007-05},
    abstract = {Um Geb\"aude in Bildern detektieren detektieren zu k\"onnen, greifen wir als ein Merkmal auf Bildkanten zur\"uck. Kanten liegen zum einen aus der Bildsegmentierung vor, k\"onnen aber auch gezielt mit einer Kantenextraktion aus dem quadratisch Gradientenbild gewonnen werden. Durch die Auswahl von Kanten, die in beiden F \"allen auftreten, wollen wir eine Einschr\"ankung auf m\"oglichst relevante Kanten vornehmen. Diese Arbeit besch\"aftigt sich mit dem Au?nden (und der Gewichtung) von Kantenz\"ugen eines segmentierten Bildes, die einer Kante im quadratidschen Gradientenbild zugeordnet werden k\"onnen.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Herms2007Extraktion.pdf},
    }

  • A. Janz, S. van der Linden, B. Waske, and P. Hostert, “imageSVM – A user-oriented tool for advanced classification of hyperspectral data using support vector machines,” in 5th Workshop of the EARSeL Special Interest Group Imaging Spectroscopy, 2007.
    [BibTeX] [PDF]

    An implementation for the classification of remote sensing images with support vector machines (SVM) is introduced. This tool, called imageSVM, allows a user-friendly work, especially with large, highly-resolved data sets in the ENVI/IDL environment. imageSVM uses LIBSVM for the training of the SVM in combination with a user-defined grid search. Parameter settings can be set flexibly during the entire workflow and a time-efficient processing becomes possible. First tests underline the high-accuracy of SVM classification using heterogeneous hyperspectral data and the good performance of SVM in the context of multi-sensoral studies.

    @InProceedings{janz2007imagesvm,
    title = {imageSVM - A user-oriented tool for advanced classification of hyperspectral data using support vector machines},
    author = {Janz, Andreas and van der Linden, Sebastian and Waske, Bj\"orn and Hostert, Patrick},
    booktitle = {5th Workshop of the EARSeL Special Interest Group Imaging Spectroscopy},
    year = {2007},
    abstract = {An implementation for the classification of remote sensing images with support vector machines (SVM) is introduced. This tool, called imageSVM, allows a user-friendly work, especially with large, highly-resolved data sets in the ENVI/IDL environment. imageSVM uses LIBSVM for the training of the SVM in combination with a user-defined grid search. Parameter settings can be set flexibly during the entire workflow and a time-efficient processing becomes possible. First tests underline the high-accuracy of SVM classification using heterogeneous hyperspectral data and the good performance of SVM in the context of multi-sensoral studies.},
    owner = {waske},
    timestamp = {2012.09.05},
    url = {https://www.ipb.uni-bonn.de/pdfs/Janz2007imageSVM.pdf},
    }

  • D. Joho, C. Stachniss, P. Pfaff, and W. Burgard, “Autonomous Exploration for 3D Map Learning,” in Autonome Mobile Systeme, Kaiserslautern, Germany, 2007.
    [BibTeX] [PDF]
    [none]
    @InProceedings{joho2007,
    title = {Autonomous Exploration for 3D Map Learning},
    author = {Joho, D. and Stachniss, C. and Pfaff, P. and Burgard, W.},
    booktitle = ams,
    year = {2007},
    address = {Kaiserslautern, Germany},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/joho07ams.pdf},
    }

  • F. Klughardt, “Einführung eines neuen Photokonsistenz-Maßes zur Oberflächenrekonstruktion in Luftbildern mittels eines Multilabel-Graphcut-Verfahrens,” Diplomarbeit Master Thesis, 2007.
    [BibTeX]

    Die Zuordnung von Bildern im Rahmen einer dreidimensionalen Rekonstruktion der abgebildeten Szene stellt in seiner Allgemeinheit ein bisher – im Vergleich zum visuellen System des Menschen – nur sehr unzureichend gelöstes Problem dar. Gleichzeitig gibt es eine große Zahl erfolgreicher Ansätze zur Lösung des Problems unter wohl definierten Bedingungen und eine beträchtliche Zahl in der Praxis angewendeter Verfahren. Die Rekonstruktion von Oberflächenmodellen aus Luftbildern stellt eine in jüngster Zeit mit den Entwicklungen von Google-Earth and Virtual Earth zunehmend beachtete Problemstellung dar. Die Randbedingungen für eine Stereorekonstruktion sind hier wegen der meist günstig gewählten Lichtverhältnisse bei der Bildaufnahme und der meist vorhandenen diffusen Reflexionseigenschaften der Oberflächen vergleichsweise homogen, wenn man von Schatteneffekten und gelegentlichen spiegelnden Reflektionen absieht. Das in diesem Bereich übliche Maß zur Kennzeichnung der Ähnlichkeit zugeordneter Bildbereiche sind der normalisierte Korrelationskoeffizient und die Summe der quadratischen Intensitätsdifferenzen. Die beiden Maße stellen Extreme bzgl. der Invarianz gegen Beleuchtungsveränderung dar: Der Korrelationskoeffizient ist völlig invariant, die Summe der quadratischen Intensitätsdifferenzen nicht invariant. Das zentrale Anliegen der vorliegenden Arbeit ist die Entwicklung eines neuen Photokonsistenzmaßes, das zwischen diesen bei den Extremen zu vermitteln in der Lage ist. Für eine genäherte Oberflächenrekonstruktion wird das Multi-Level-Graphcut-Verfahren eingesetzt, das vergleichsweise effizient das komplexe Problem der Oberflächenrekonstruktion lösen kann und das neben dem neuen Photokonsistenzmaß flexibel Vorinformation über die Oberfläche integrieren und so das Problem von Unstetigkeiten angehen kann.

    @MastersThesis{klughardt2007einfuhrung,
    title = {Einf\"uhrung eines neuen Photokonsistenz-Ma{\ss}es zur Oberfl\"achenrekonstruktion in Luftbildern mittels eines Multilabel-Graphcut-Verfahrens},
    author = {Klughardt, Frank},
    school = {Institute of Photogrammetry, University of Bonn In Zusammenarbeit mit dem Institut f\"ur Informatik der Universit\"at Bonn},
    year = {2007},
    note = {Betreuung: Prof. Dr. Daniel Cremers, Prof. Dr.-Ing. Wolfgang F\"orstner},
    type = {Diplomarbeit},
    abstract = {Die Zuordnung von Bildern im Rahmen einer dreidimensionalen Rekonstruktion der abgebildeten Szene stellt in seiner Allgemeinheit ein bisher - im Vergleich zum visuellen System des Menschen - nur sehr unzureichend gel\"ostes Problem dar. Gleichzeitig gibt es eine gro{\ss}e Zahl erfolgreicher Ans\"atze zur L\"osung des Problems unter wohl definierten Bedingungen und eine betr\"achtliche Zahl in der Praxis angewendeter Verfahren. Die Rekonstruktion von Oberfl\"achenmodellen aus Luftbildern stellt eine in j\"ungster Zeit mit den Entwicklungen von Google-Earth and Virtual Earth zunehmend beachtete Problemstellung dar. Die Randbedingungen f\"ur eine Stereorekonstruktion sind hier wegen der meist g\"unstig gew\"ahlten Lichtverh\"altnisse bei der Bildaufnahme und der meist vorhandenen diffusen Reflexionseigenschaften der Oberfl\"achen vergleichsweise homogen, wenn man von Schatteneffekten und gelegentlichen spiegelnden Reflektionen absieht. Das in diesem Bereich \"ubliche Ma{\ss} zur Kennzeichnung der \"Ahnlichkeit zugeordneter Bildbereiche sind der normalisierte Korrelationskoeffizient und die Summe der quadratischen Intensit\"atsdifferenzen. Die beiden Ma{\ss}e stellen Extreme bzgl. der Invarianz gegen Beleuchtungsver\"anderung dar: Der Korrelationskoeffizient ist v\"ollig invariant, die Summe der quadratischen Intensit\"atsdifferenzen nicht invariant. Das zentrale Anliegen der vorliegenden Arbeit ist die Entwicklung eines neuen Photokonsistenzma{\ss}es, das zwischen diesen bei den Extremen zu vermitteln in der Lage ist. F\"ur eine gen\"aherte Oberfl\"achenrekonstruktion wird das Multi-Level-Graphcut-Verfahren eingesetzt, das vergleichsweise effizient das komplexe Problem der Oberfl\"achenrekonstruktion l\"osen kann und das neben dem neuen Photokonsistenzma{\ss} flexibel Vorinformation \"uber die Oberfl\"ache integrieren und so das Problem von Unstetigkeiten angehen kann.},
    city = {Bonn},
    }

  • F. Korč and V. Hlaváč, “Human Motion – Understanding, Modeling, Capture and Animation,” , 1 ed., B. Rosenhahn, R. Klette, and D. Metaxas, Eds., Springer, 2007, vol. 36, p. 105–130.
    [BibTeX] [PDF]

    This work contributes to detection and tracking of walking or running humans in surveillance video sequences. We propose a 2D model-based approach to the whole body tracking in a video sequence captured from a single camera view. An extended six-link biped human model is employed. We assume that a static camera observes the scene horizontally or obliquely. Persons can be seen from a continuum of views ranging from a lateral to a frontal one. We do not expect humans to be the only moving objects in the scene and to appear at the same scale at different image locations.

    @InBook{korvc2007human,
    title = {Human Motion - Understanding, Modeling, Capture and Animation},
    author = {Kor{\vc}, Filip and Hlav{\'a}{\vc}, V{\'a}clav},
    chapter = {Detection and Tracking of Humans in Single View Sequences Using 2D Articulated Model},
    editor = {Rosenhahn, Bodo and Klette, Reinhard and Metaxas, Dimitris},
    pages = {105--130},
    publisher = {Springer},
    year = {2007},
    edition = {1},
    series = {Computational Imaging and Vision},
    volume = {36},
    abstract = {This work contributes to detection and tracking of walking or running humans in surveillance video sequences. We propose a 2D model-based approach to the whole body tracking in a video sequence captured from a single camera view. An extended six-link biped human model is employed. We assume that a static camera observes the scene horizontally or obliquely. Persons can be seen from a continuum of views ranging from a lateral to a frontal one. We do not expect humans to be the only moving objects in the scene and to appear at the same scale at different image locations.},
    isbn = {978-1-4020-6692-4},
    keywords = {human detection in video, model-based human detection},
    url = {https://www.ipb.uni-bonn.de/pdfs/Korvc2007Human.pdf},
    }

  • S. van der Linden, A. Janz, B. Waske, M. Eiden, and P. Hostert, “Classifying segmented hyperspectral data from a heterogeneous urban environment using support vector machines,” Journal of Applied Remote Sensing, vol. 1, p. 13543, 2007. doi:10.1117/1.2813466
    [BibTeX]

    Classifying remotely sensed images from urban environments is challenging. Urban land cover classes are spectrally heterogeneous and materials from different classes have similar spectral properties. Image segmentation has become a common preprocessing step that helped to overcome such problems. However, little attention has been paid to impacts of segmentation on the data’s spectral information content. Here, urban hyperspectral data is spectrally classified using support vector machines (SVM). By training a SVM on pixel information and applying it to the image before segmentation and after segmentation at different levels, the classification framework is maintained and the influence of the spectral generalization during image segmentation hence directly investigated. In addition, a straightforward multi-level approach was performed, which combines information from different levels into one final map. A stratified accuracy assessment by urban structure types is applied. The classification of the unsegmented data achieves an overall accuracy of 88.7\%. Accuracy of the segment-based classification is lower and decreases with increasing segment size. Highest accuracies for the different urban structure types are achieved at varying segmentation levels. The accuracy of the multi-level approach is similar to that of unsegmented data but comprises the positive effects of more homogeneous segment-based classifications at different levels in one map.

    @Article{linden2007classifying,
    title = {Classifying segmented hyperspectral data from a heterogeneous urban environment using support vector machines},
    author = {van der Linden, Sebastian and Janz, Andreas and Waske, Bj\"orn and Eiden, Michael and Hostert, Patrick},
    journal = {Journal of Applied Remote Sensing},
    year = {2007},
    pages = {013543},
    volume = {1},
    abstract = {Classifying remotely sensed images from urban environments is challenging. Urban land cover classes are spectrally heterogeneous and materials from different classes have similar spectral properties. Image segmentation has become a common preprocessing step that helped to overcome such problems. However, little attention has been paid to impacts of segmentation on the data's spectral information content. Here, urban hyperspectral data is spectrally classified using support vector machines (SVM). By training a SVM on pixel information and applying it to the image before segmentation and after segmentation at different levels, the classification framework is maintained and the influence of the spectral generalization during image segmentation hence directly investigated. In addition, a straightforward multi-level approach was performed, which combines information from different levels into one final map. A stratified accuracy assessment by urban structure types is applied. The classification of the unsegmented data achieves an overall accuracy of 88.7\%. Accuracy of the segment-based classification is lower and decreases with increasing segment size. Highest accuracies for the different urban structure types are achieved at varying segmentation levels. The accuracy of the multi-level approach is similar to that of unsegmented data but comprises the positive effects of more homogeneous segment-based classifications at different levels in one map.},
    doi = {10.1117/1.2813466},
    owner = {waske},
    sn = {1931-3195},
    tc = {11},
    timestamp = {2012.09.04},
    ut = {WOS:000260914300007},
    z8 = {0},
    z9 = {11},
    zb = {2},
    }

  • S. van der Linden, B. Waske, and P. Hostert, “Towards an optimized use of the spectral angle space,” in 5th Workshop of the EARSeL Special Interest Group Imaging Spectroscopy, 2007.
    [BibTeX] [PDF]

    The concept of spectral angle mapping (SAM) is extended in this work by the use of self-learning decision trees (DT) to evaluate rule images. We test whether the performance of the SAM can be improved to achieve the quality of more recent machine learning classifiers in spectrally heterogeneous environments. Results show that the integration of the DT significantly increases the accuracy of the SAM of urban hyperspectral data. However, the accuracy of support vector machines is not achieved. Despite this lower accuracy, the spectral angle space as constituted by the SAM rule images appears to be a useful class-specific transformation of the data, which might be used similar to common transformations in future works.

    @InProceedings{linden2007towards,
    title = {Towards an optimized use of the spectral angle space},
    author = {van der Linden, Sebastian and Waske, Bj\"orn and Hostert, Patrick},
    booktitle = {5th Workshop of the EARSeL Special Interest Group Imaging Spectroscopy},
    year = {2007},
    abstract = {The concept of spectral angle mapping (SAM) is extended in this work by the use of self-learning decision trees (DT) to evaluate rule images. We test whether the performance of the SAM can be improved to achieve the quality of more recent machine learning classifiers in spectrally heterogeneous environments. Results show that the integration of the DT significantly increases the accuracy of the SAM of urban hyperspectral data. However, the accuracy of support vector machines is not achieved. Despite this lower accuracy, the spectral angle space as constituted by the SAM rule images appears to be a useful class-specific transformation of the data, which might be used similar to common transformations in future works.},
    owner = {waske},
    timestamp = {2012.09.05},
    url = {https://www.ipb.uni-bonn.de/pdfs/Linden2007Towards.pdf},
    }

  • O. Martínez-Mozos, C. Stachniss, A. Rottmann, and W. Burgard, “Using AdaBoost for Place Labelling and Topological Map Building,” in Robotics Research, S. Thrun, R. Brooks, and H. Durrant-Whyte, Eds., springer, 2007, vol. 28.
    [BibTeX] [PDF]
    [none]
    @InCollection{martinez-mozos2007,
    title = {Using AdaBoost for Place Labelling and Topological Map Building},
    author = {Mart\'{i}nez-Mozos, O. and Stachniss, C. and Rottmann, A. and Burgard, W.},
    booktitle = {Robotics Research},
    publisher = springer,
    year = {2007},
    editor = {Thrun, S. and Brooks, R. and Durrant-Whyte, H.},
    series = springerstaradvanced,
    volume = {28},
    abstract = {[none]},
    isbn = {978-3-540-48110-2},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/martinez07springer.pdf},
    }

  • P. Pfaff, R. Kuemmerle, D. Joho, C. Stachniss, R. Triebel, and Burgard, “Navigation in Combined Outdoor and Indoor Environments using Multi-Level Surface Maps,” , San Diego, CA, USA, 2007.
    [BibTeX] [PDF]
    [none]
    @InProceedings{pfaff2007a,
    title = {Navigation in Combined Outdoor and Indoor Environments using Multi-Level Surface Maps},
    author = {Pfaff, P. and Kuemmerle, R. and Joho, D. and Stachniss, C. and Triebel, R. and Burgard},
    booktitle = iroswsnav,
    year = {2007},
    address = {San Diego, CA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/pfaff07irosws.pdf},
    }

  • P. Pfaff, R. Triebel, C. Stachniss, P. Lamon, W. Burgard, and R. Siegwart, “Towards Mapping of Cities,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Rome, Italy, 2007.
    [BibTeX] [PDF]
    [none]
    @InProceedings{pfaff2007,
    title = {Towards Mapping of Cities},
    author = {Pfaff, P. and Triebel, R. and Stachniss, C. and Lamon, P. and Burgard, W. and Siegwart, R.},
    booktitle = icra,
    year = {2007},
    address = {Rome, Italy},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/pfaff07icra.pdf},
    }

  • J. Saatkamp and J. Schmittwilken, “Generative Models and Markov Chain Monte Carlo Techniques for Detection and Reconstruction of Stairs from Point Clouds,” in Proc. of the ISPRS Workshop on Updating Geo-spatial Databases with Imagery & The 5th ISPRS Workshop on Dynamic and Multi-dimensional GIS, Urumqi, China, 2007, p. 111–119.
    [BibTeX] [PDF]

    The paper describes an approach for the automatical reconstruction of homogeneous straight stairs from point cloud data by using a generative model and Markov Chain Monte Carlo techniques for estimating the parameters. Parameters for a generative model for stairs are presented. The six parameters of this 2D model are determined with a maximum-a-posteriori estimation approach. For all parameters prior probability distributions are chosen. Two types of likelihood functions are introduced. It is shown that four of the parameters under certain conditions can be determined via MCMC. Some results are presented.

    @InProceedings{saatkamp2007generative,
    title = {Generative Models and Markov Chain Monte Carlo Techniques for Detection and Reconstruction of Stairs from Point Clouds},
    author = {Saatkamp, Jens and Schmittwilken, J\"org},
    booktitle = {Proc. of the ISPRS Workshop on Updating Geo-spatial Databases with Imagery \& The 5th ISPRS Workshop on Dynamic and Multi-dimensional GIS},
    year = {2007},
    address = {Urumqi, China},
    editor = {Jiang, Jie and Zhao, Renliang},
    month = aug,
    number = {part 4/W54},
    organization = {ISPRS},
    pages = {111--119},
    series = {The International Archives of the Photogrammetry, Remote Sensing and Spatial Information Sciences},
    volume = {XXXVI},
    abstract = {The paper describes an approach for the automatical reconstruction of homogeneous straight stairs from point cloud data by using a generative model and Markov Chain Monte Carlo techniques for estimating the parameters. Parameters for a generative model for stairs are presented. The six parameters of this 2D model are determined with a maximum-a-posteriori estimation approach. For all parameters prior probability distributions are chosen. Two types of likelihood functions are introduced. It is shown that four of the parameters under certain conditions can be determined via MCMC. Some results are presented.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Saatkamp2007Generative.pdf},
    }

  • J. Schmittwilken, J. Saatkamp, W. Förstner, T. Kolbe, and L. Plümer, “A Semantic Model of Stairs in Building Collars,” Photogrammetrie, Fernerkundung, Geoinformation PFG, p. 415–428, 2007.
    [BibTeX] [PDF]

    The automated extraction of high resolution 3D building models from imagery and laser scanner data requires strong models for all features which are observable at a large scale. In this paper we give a semantic model of stairs. They play a prominent role in the transition from buildings to the surrounding terrain or infrastructure. We name the transition area between terrain and building collar, and the focus is on stairs in building collars. Simple and complex stairways are represented by UML class diagrams along with constraints reflecting semantic and functional aspects in OCL. A systematic derivation of an attribute grammar consisting of production and semantic rules from UML/OCL is presented. Finally, we show how hypotheses with comprehensive predictions may be derived from observations using mixed integer/real programming driven by grammar rules.

    @Article{schmittwiken2007semantic,
    title = {A Semantic Model of Stairs in Building Collars},
    author = {Schmittwilken, J\"org and Saatkamp, Jens and F\"orstner, Wolfgang and Kolbe, Thomas and Pl\"umer, Lutz},
    journal = {Photogrammetrie, Fernerkundung, Geoinformation PFG},
    year = {2007},
    pages = {415--428},
    abstract = {The automated extraction of high resolution 3D building models from imagery and laser scanner data requires strong models for all features which are observable at a large scale. In this paper we give a semantic model of stairs. They play a prominent role in the transition from buildings to the surrounding terrain or infrastructure. We name the transition area between terrain and building collar, and the focus is on stairs in building collars. Simple and complex stairways are represented by UML class diagrams along with constraints reflecting semantic and functional aspects in OCL. A systematic derivation of an attribute grammar consisting of production and semantic rules from UML/OCL is presented. Finally, we show how hypotheses with comprehensive predictions may be derived from observations using mixed integer/real programming driven by grammar rules.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Schmittwiken2007Semantic.pdf},
    }

  • C. Schmitz, “Untersuchungen zur Genauigkeit der gleichzeitigen Lokalisierung und Kartierung aus monokularen Bildfolgen,” Diplomarbeit Master Thesis, 2007.
    [BibTeX]
    [none]
    @MastersThesis{schmitz2007untersuchungen,
    title = {Untersuchungen zur Genauigkeit der gleichzeitigen Lokalisierung und Kartierung aus monokularen Bildfolgen},
    author = {Schmitz, Cornelia},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2007},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. Richard Steffen},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • C. Stachniss, G. Grisetti, W. Burgard, and N. Roy, “Evaluation of Gaussian Proposal Distributions for Mapping with Rao-Blackwellized Particle Filters,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), San Diego, CA, USA, 2007.
    [BibTeX] [PDF]
    [none]
    @InProceedings{stachniss2007a,
    title = {Evaluation of Gaussian Proposal Distributions for Mapping with Rao-Blackwellized Particle Filters},
    author = {Stachniss, C. and Grisetti, G. and Burgard, W. and Roy, N.},
    booktitle = iros,
    year = {2007},
    address = {San Diego, CA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/stachniss07iros.pdf},
    }

  • C. Stachniss, G. Grisetti, O. Martínez-Mozos, and W. Burgard, “Efficiently Learning Metric and Topological Maps with Autonomous Service Robots,” it – Information Technology, vol. 49, iss. 4, p. 232–238, 2007.
    [BibTeX]
    [none]
    @Article{stachniss2007,
    title = {Efficiently Learning Metric and Topological Maps with Autonomous Service Robots},
    author = {Stachniss, C. and Grisetti, G. and Mart\'{i}nez-Mozos, O. and Burgard, W.},
    journal = {it -- Information Technology},
    year = {2007},
    number = {4},
    pages = {232--238},
    volume = {49},
    abstract = {[none]},
    editor = {Buss, M. and Lawitzki, G.},
    timestamp = {2014.04.24},
    }

  • B. Steder, G. Grisetti, S. Grzonka, C. Stachniss, A. Rottmann, and W. Burgard, “Learning Maps in 3D using Attitude and Noisy Vision Sensors,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), San Diego, CA, USA, 2007.
    [BibTeX] [PDF]
    [none]
    @InProceedings{steder2007,
    title = {Learning Maps in 3D using Attitude and Noisy Vision Sensors},
    author = {Steder, B. and Grisetti, G. and Grzonka, S. and Stachniss, C. and Rottmann, A. and Burgard, W.},
    booktitle = iros,
    year = {2007},
    address = {San Diego, CA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/steder07iros.pdf},
    }

  • B. Steder, A. Rottmann, G. Grisetti, C. Stachniss, and W. Burgard, “Autonomous Navigation for Small Flying Vehicles,” , San Diego, CA, USA, 2007.
    [BibTeX] [PDF]
    [none]
    @InProceedings{steder,
    title = {Autonomous Navigation for Small Flying Vehicles},
    author = {Steder, B. and Rottmann, A. and Grisetti, G. and Stachniss, C. and Burgard, W.},
    booktitle = iroswsfly,
    year = {2007},
    address = {San Diego, CA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.informatik.uni-freiburg.de/~steder/publications/steder07irosws.pdf},
    }

  • R. Steffen and C. Beder, “Recursive Estimation with Implicit Constraints,” in Proc. of the DAGM 2007, Heidelberg, 2007, p. 194–203. doi:10.1007/978-3-540-74936-3_20
    [BibTeX] [PDF]

    Recursive estimation or Kalman filtering usually relies on explicit model functions, that directly and explicitly describe the effect of the parameters on the observations. However, many problems in computer vision, including all those resulting in homogeneous equation systems, are easier described using implicit constraints between the observations and the parameters. By implicit we mean, that the constraints are given by equations, that are not easily solvable for the observation vector. We present a framework, that allows to incorporate such implicit constraints as measurement equations into a Kalman filter. The algorithm may be used as a black-box, simplifying the process of specifying suitable measurement equations for many problems. As a byproduct, the possibility of specifying model equations non-explicitly, some non-linearities may be avoided and better results can be achieved for certain problems.

    @InProceedings{steffen2007recursive,
    title = {Recursive Estimation with Implicit Constraints},
    author = {Steffen, Richard and Beder, Christian},
    booktitle = {Proc. of the DAGM 2007},
    year = {2007},
    address = {Heidelberg},
    editor = {F.A. Hamprecht and C. Schn\"orr and B. J\"ahne},
    number = {4713},
    pages = {194--203},
    publisher = {Springer},
    series = {LNCS},
    abstract = {Recursive estimation or Kalman filtering usually relies on explicit model functions, that directly and explicitly describe the effect of the parameters on the observations. However, many problems in computer vision, including all those resulting in homogeneous equation systems, are easier described using implicit constraints between the observations and the parameters. By implicit we mean, that the constraints are given by equations, that are not easily solvable for the observation vector. We present a framework, that allows to incorporate such implicit constraints as measurement equations into a Kalman filter. The algorithm may be used as a black-box, simplifying the process of specifying suitable measurement equations for many problems. As a byproduct, the possibility of specifying model equations non-explicitly, some non-linearities may be avoided and better results can be achieved for certain problems.},
    doi = {10.1007/978-3-540-74936-3_20},
    url = {https://www.ipb.uni-bonn.de/pdfs/Steffen2007Recursive.pdf},
    }

  • H. Strasdat, C. Stachniss, M. Bennewitz, and W. Burgard, “Visual Bearing-Only Simultaneous Localization and Mapping with Improved Feature Matching,” in Autonome Mobile Systeme, Kaiserslautern, Germany, 2007.
    [BibTeX] [PDF]
    [none]
    @InProceedings{strasdat2007,
    title = {Visual Bearing-Only Simultaneous Localization and Mapping with Improved Feature Matching},
    author = {Strasdat, H. and Stachniss, C. and Bennewitz, M. and Burgard, W.},
    booktitle = ams,
    year = {2007},
    address = {Kaiserslautern, Germany},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/strasdat07ams.pdf},
    }

  • B. Waske and J. A. Benediktsson, “Decision Fusion of Multitemporal SAR and Multispectral Imagery for Improved Land Cover Classification,” in ISPRS Mapping without the sun, 2007.
    [BibTeX]
    [none]
    @InProceedings{waske2007decision,
    title = {Decision Fusion of Multitemporal SAR and Multispectral Imagery for Improved Land Cover Classification},
    author = {Waske, Bj\"orn and Benediktsson, Jon Atli},
    booktitle = {ISPRS Mapping without the sun},
    year = {2007},
    abstract = {[none]},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • B. Waske and J. A. Benediktsson, “Fusion of support vector machines for classification of multisensor data,” IEEE Transactions on Geoscience and Remote Sensing, vol. 45, iss. 12, p. 3858–3866, 2007. doi:10.1109/TGRS.2007.898446
    [BibTeX]

    The classification of multisensor data sets, consisting of multitemporal synthetic aperture radar data and optical imagery, is addressed. The concept is based on the decision fusion of different outputs. Each data source is treated separately and classified by a support vector machine (SVM). Instead of fusing the final classification outputs (i.e., land cover classes), the original outputs of each SVM discriminant function are used in the subsequent fusion process. This fusion is performed by another SVM, which is trained on the a priori outputs. In addition, two voting schemes are applied to create the final classification results. The results are compared with well-known parametric and nonparametric classifier methods, i.e., decision trees, the maximum-likelihood classifier, and classifier ensembles. The proposed SVM-based fusion approach outperforms all other approaches and significantly improves the results of a single SVM, which is trained on the whole multisensor data set.

    @Article{waske2007fusion,
    title = {Fusion of support vector machines for classification of multisensor data},
    author = {Waske, Bj\"orn and Benediktsson, Jon Atli},
    journal = {IEEE Transactions on Geoscience and Remote Sensing},
    year = {2007},
    month = dec,
    number = {12},
    pages = {3858--3866},
    volume = {45},
    abstract = {The classification of multisensor data sets, consisting of multitemporal synthetic aperture radar data and optical imagery, is addressed. The concept is based on the decision fusion of different outputs. Each data source is treated separately and classified by a support vector machine (SVM). Instead of fusing the final classification outputs (i.e., land cover classes), the original outputs of each SVM discriminant function are used in the subsequent fusion process. This fusion is performed by another SVM, which is trained on the a priori outputs. In addition, two voting schemes are applied to create the final classification results. The results are compared with well-known parametric and nonparametric classifier methods, i.e., decision trees, the maximum-likelihood classifier, and classifier ensembles. The proposed SVM-based fusion approach outperforms all other approaches and significantly improves the results of a single SVM, which is trained on the whole multisensor data set.},
    cl = {Hong Kong, PEOPLES R CHINA},
    ct = {4th International Workshop on Pattern Recognition in Remote Sensing},
    cy = {AUG 20, 2006},
    doi = {10.1109/TGRS.2007.898446},
    owner = {waske},
    pn = {Part 1},
    sn = {0196-2892},
    tc = {56},
    timestamp = {2012.09.04},
    ut = {WOS:000251339400002},
    z8 = {2},
    z9 = {58},
    zb = {6},
    }

  • B. Waske, M. Braun, and G. Menz, “A segment-based speckle filter using multisensoral remote sensing imagery,” IEEE Geoscience and Remote Sensing Letters, vol. 4, iss. 2, p. 231–235, 2007. doi:10.1109/LGRS.2006.888849
    [BibTeX]

    In the proposed approach, the well-known enhanced Lee filter is modified to allow the integration of feature outlines-previously extracted from segmented optical images. The filter is applied to several ENVISAT ASAR images that cover urban, agricultural, and forest areas during different plant phenological stages. The performance of this segment-based speckle filter is compared to those of other filters using ratio images, visual interpretation, and statistical indexes. The approach reduces the loss of radiometry and spatial information. It performs comparable to more complex methods and outperforms common techniques.

    @Article{waske2007segment,
    title = {A segment-based speckle filter using multisensoral remote sensing imagery},
    author = {Waske, Bj\"orn and Braun, Matthias and Menz, Gunter},
    journal = {IEEE Geoscience and Remote Sensing Letters},
    year = {2007},
    month = apr,
    number = {2},
    pages = {231--235},
    volume = {4},
    abstract = {In the proposed approach, the well-known enhanced Lee filter is modified to allow the integration of feature outlines-previously extracted from segmented optical images. The filter is applied to several ENVISAT ASAR images that cover urban, agricultural, and forest areas during different plant phenological stages. The performance of this segment-based speckle filter is compared to those of other filters using ratio images, visual interpretation, and statistical indexes. The approach reduces the loss of radiometry and spatial information. It performs comparable to more complex methods and outperforms common techniques.},
    doi = {10.1109/LGRS.2006.888849},
    owner = {waske},
    sn = {1545-598X},
    tc = {1},
    timestamp = {2012.09.04},
    ut = {WOS:000246033900009},
    z8 = {0},
    z9 = {1},
    zb = {0},
    }

  • B. Waske, V. Heinzel, M. Braun, and G. Menz, “Random Forests for Classifying multi-temporal SAR Data,” in ESA’s ENVISAT Symposium, 2007.
    [BibTeX] [PDF]

    The accuracy of supervised land cover classifications depends on several factors like the chosen algorithm, adequate training data and the selection of features. In regard to multi-temporal remote sensing imagery statistical classifier are often not applicable. In the study presented here, a Random Forest was applied to a SAR data set, consisting of 15 acquisitions. A detailed accuracy assessment shows that the Random Forest significantly increases the efficiency of the single decision tree and can outperform other classifiers in terms of accuracy. A visual interpretation confirms the statistical accuracy assessment. The imagery is classified into more homogeneous regions and the noise is significantly decreased. The additional time needed for the generation of Random Forests is little and can be justified. It is still a lot faster than other state-of-the-art classifiers.

    @InProceedings{waske2007random,
    title = {Random Forests for Classifying multi-temporal SAR Data},
    author = {Waske, Bj\"orn and Heinzel, Vanessa and Braun, Matthias and Menz, Gunter},
    booktitle = {ESA's ENVISAT Symposium},
    year = {2007},
    abstract = {The accuracy of supervised land cover classifications depends on several factors like the chosen algorithm, adequate training data and the selection of features. In regard to multi-temporal remote sensing imagery statistical classifier are often not applicable. In the study presented here, a Random Forest was applied to a SAR data set, consisting of 15 acquisitions. A detailed accuracy assessment shows that the Random Forest significantly increases the efficiency of the single decision tree and can outperform other classifiers in terms of accuracy. A visual interpretation confirms the statistical accuracy assessment. The imagery is classified into more homogeneous regions and the noise is significantly decreased. The additional time needed for the generation of Random Forests is little and can be justified. It is still a lot faster than other state-of-the-art classifiers.},
    owner = {waske},
    timestamp = {2012.09.05},
    url = {https://www.ipb.uni-bonn.de/pdfs/Waske2007Random.pdf},
    }

  • B. Waske, G. Menz, and J. A. Benediktsson, “Fusion of support vector machines for classifying SAR and multispectral imagery from agricultural areas,” in IEEE International Geoscience and Remote Sensing Symposium (IGARSS), 2007. doi:10.1109/IGARSS.2007.4423945
    [BibTeX]

    A concept for classifying multisensor data sets, consisting of multispectral and SAR imagery is introduced. Each data source is separately classified by a support vector machine (SVM). In a decision fusion the outputs of the preliminary SVMs are used to determine the final class memberships. This fusion is performed by another SVM as well as two common voting schemes. The results are compared with well-known parametric and nonparametric classifier methods. The proposed SVM-based fusion approach outperforms all other concepts and significantly improves the results of a single SVM that is trained on the whole multisensor data set.

    @InProceedings{waske2007fusiona,
    title = {Fusion of support vector machines for classifying SAR and multispectral imagery from agricultural areas},
    author = {Waske, Bj\"orn and Menz, Gunter and Benediktsson, Jon Atli},
    booktitle = {IEEE International Geoscience and Remote Sensing Symposium (IGARSS)},
    year = {2007},
    abstract = {A concept for classifying multisensor data sets, consisting of multispectral and SAR imagery is introduced. Each data source is separately classified by a support vector machine (SVM). In a decision fusion the outputs of the preliminary SVMs are used to determine the final class memberships. This fusion is performed by another SVM as well as two common voting schemes. The results are compared with well-known parametric and nonparametric classifier methods. The proposed SVM-based fusion approach outperforms all other concepts and significantly improves the results of a single SVM that is trained on the whole multisensor data set.},
    doi = {10.1109/IGARSS.2007.4423945},
    keywords = {SAR imagery classification;SVM-based fusion approach;Support Vector Machines;agricultural areas;common voting schemes;multisensor data sets classification;multispectral imagery classification;nonparametric classifier method;parametric classifier method;agriculture;image classification;sensor fusion;support vector machines;synthetic aperture radar;},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • S. Wenzel, “Spiegelung und Zuordnung der SIFT-Feature Deskriptoren für die Detektion von Symmetrien und wiederholten Strukturen in Bildern,” Department of Photogrammetry, University of Bonn, TR-IGG-P-2007-04, 2007.
    [BibTeX] [PDF]

    This report describes the details for mirroring the descriptors of the SIFT-features. We show how the mirrored versions are derived by simply resorting the descriptor elements. Furthermore, we describe the matching of features within an image. The peculiarity of this task is the search for more than one – the best – match within an single image. The presented methods are based on the work of (Wenzel2006,Detektion). After the introduction the functionallity of the SIFT-feature detector is drafted and the development of the descriptors is described in detail. The following sections describe the details of mirroring and matching the features. Dieser Bericht geht auf die Details zur Spiegelung von SIFT-Feature Deskritoren ein. Es wird gezeigt, wie durch einfaches Umsortieren der Elemente des Feature Deskriptors gespiegelte Versionen der Deskriptoren erlangt werden können. Des Weiteren wird erläutert, wie Features innerhalb eines Bildes zugeordnet werden können. Die Besonderheit dieser Aufgabenstellung liegt in der gesuchten Zuordnung nicht eines – des besten – Matches, sondern in der Zuordnung aller Matches in einem Bild. Die vorgestellten Methoden basieren auf (Wenzel2006,Detektion).

    @TechReport{wenzel2007spiegelung,
    title = {Spiegelung und Zuordnung der SIFT-Feature Deskriptoren f\"ur die Detektion von Symmetrien und wiederholten Strukturen in Bildern},
    author = {Wenzel, Susanne},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2007},
    month = aug,
    number = {TR-IGG-P-2007-04},
    abstract = {This report describes the details for mirroring the descriptors of the SIFT-features. We show how the mirrored versions are derived by simply resorting the descriptor elements. Furthermore, we describe the matching of features within an image. The peculiarity of this task is the search for more than one - the best - match within an single image. The presented methods are based on the work of (Wenzel2006,Detektion). After the introduction the functionallity of the SIFT-feature detector is drafted and the development of the descriptors is described in detail. The following sections describe the details of mirroring and matching the features. Dieser Bericht geht auf die Details zur Spiegelung von SIFT-Feature Deskritoren ein. Es wird gezeigt, wie durch einfaches Umsortieren der Elemente des Feature Deskriptors gespiegelte Versionen der Deskriptoren erlangt werden k\"onnen. Des Weiteren wird erl\"autert, wie Features innerhalb eines Bildes zugeordnet werden k\"onnen. Die Besonderheit dieser Aufgabenstellung liegt in der gesuchten Zuordnung nicht eines - des besten - Matches, sondern in der Zuordnung aller Matches in einem Bild. Die vorgestellten Methoden basieren auf (Wenzel2006,Detektion).},
    url = {https://www.ipb.uni-bonn.de/pdfs/Wenzel2007Spiegelung.pdf},
    }

  • S. Wenzel, M. Drauschke, and W. Förstner, “Detektion wiederholter und symmetrischer Strukturen in Fassadenbildern,” in Publikationen der DGPF: Von der Medizintechnik bis zur Planetenforschung – Photogrammetrie und Fernerkundung für das 21. Jahrhundert, Muttenz, Basel, 2007, pp. 119-126.
    [BibTeX] [PDF]

    Regelmäßige Strukturen und Symmetrien kennzeichnen viele Gebäudefassaden oder Objekte im Umfeld von Gebäuden. Für die automatisierte Bildinterpretation weisen diese Strukturen auf künstliche Objekte hin, führen aber auch zu Schwierigkeiten bei klassischen Bildzuordnungsverfahren. Die Suche und Gruppierung zusammengehöriger Merkmale kann daher sowohl zur Identifikation künstlicher Objekte als auch zur Verbesserung von Zuordnungsverfahren dienen. Für die Analyse von entzerrten Fassadenaufnahmen haben wir das Verfahren von [LOY 2006] zur Detektion symmetrischer Bildstrukturen zu einem Verfahren zur Detektion verschiedener, sich wiederholender Bildstrukturen erweitert und aus den detektierten wiederholten Objekten eine minimale Beschreibung der Struktur der Fassadenelemente in Form von achsenparallelen Basiselementen abgeleitet.

    @InProceedings{wenzel2007detektion,
    title = {Detektion wiederholter und symmetrischer Strukturen in Fassadenbildern},
    author = {Wenzel, Susanne and Drauschke, Martin and F\"orstner, Wolfgang},
    booktitle = {Publikationen der DGPF: Von der Medizintechnik bis zur Planetenforschung - Photogrammetrie und Fernerkundung f\"ur das 21. Jahrhundert},
    year = {2007},
    address = {Muttenz, Basel},
    editor = {Seyfert, Eckhardt},
    month = jun,
    pages = {119-126},
    publisher = {DGPF},
    volume = {16},
    abstract = {Regelm\"a{\ss}ige Strukturen und Symmetrien kennzeichnen viele Geb\"audefassaden oder Objekte im Umfeld von Geb\"auden. F\"ur die automatisierte Bildinterpretation weisen diese Strukturen auf k\"unstliche Objekte hin, f\"uhren aber auch zu Schwierigkeiten bei klassischen Bildzuordnungsverfahren. Die Suche und Gruppierung zusammengeh\"origer Merkmale kann daher sowohl zur Identifikation k\"unstlicher Objekte als auch zur Verbesserung von Zuordnungsverfahren dienen. F\"ur die Analyse von entzerrten Fassadenaufnahmen haben wir das Verfahren von [LOY 2006] zur Detektion symmetrischer Bildstrukturen zu einem Verfahren zur Detektion verschiedener, sich wiederholender Bildstrukturen erweitert und aus den detektierten wiederholten Objekten eine minimale Beschreibung der Struktur der Fassadenelemente in Form von achsenparallelen Basiselementen abgeleitet.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Wenzel2007Detektion.pdf},
    }

  • S. Wenzel, M. Drauschke, and W. Förstner, “Detection and Description of Repeated Structures in Rectified Facade Images,” Photogrammetrie, Fernerkundung, Geoinformation (PFG), vol. 7, p. 481–490, 2007.
    [BibTeX] [PDF]

    We present a method for detecting repeated structures, which is applied on facade images for describing the regularity of their windows. Our approach finds and explicitly represents repetitive structures and thus gives initial representation of facades. No explicit notion of a window is used, thus the method also appears to be able to identify other man made structures, e.g. paths with regular tiles. A method for detection of dominant symmetries is adapted for detection of multiple repeated structures. A compact description of repetitions is derived from translations detected in an image by a heuristic search method and the model selection criterion of the minimum description length.

    @Article{wenzel2007detection,
    title = {Detection and Description of Repeated Structures in Rectified Facade Images},
    author = {Wenzel, Susanne and Drauschke, Martin and F\"orstner, Wolfgang},
    journal = {Photogrammetrie, Fernerkundung, Geoinformation (PFG)},
    year = {2007},
    pages = {481--490},
    volume = {7},
    abstract = {We present a method for detecting repeated structures, which is applied on facade images for describing the regularity of their windows. Our approach finds and explicitly represents repetitive structures and thus gives initial representation of facades. No explicit notion of a window is used, thus the method also appears to be able to identify other man made structures, e.g. paths with regular tiles. A method for detection of dominant symmetries is adapted for detection of multiple repeated structures. A compact description of repetitions is derived from translations detected in an image by a heuristic search method and the model selection criterion of the minimum description length.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Wenzel2007Detectiona.pdf},
    }

  • S. Wenzel, M. Drauschke, and W. Förstner, “Detection of repeated structures in facade images,” in Proc. of the OGRW-7-2007, 7th Open German/Russian Workshop on Pattern Recognition and Image Understanding. August 20-23, 2007. Ettlingen, Germany, 2007. doi:10.1134/S1054661808030073
    [BibTeX] [PDF]

    We present a method for detecting repeated structures, which is applied on facade images for describing the regularity of their windows. Our approach finds and explicitly represents repetitive structures and thus gives initial representation of facades. No explicit notion of a window is used, thus the method also appears to be able to identify other man made structures, e.g. paths with regular tiles. A method for detection of dominant symmetries is adapted for detection of multiply repeated structures. A compact description of the repetitions is derived from the detected translations in the image by a heuristic search method and the criterion of the minimum description length.

    @InProceedings{wenzel2007detectiona,
    title = {Detection of repeated structures in facade images},
    author = {Wenzel, Susanne and Drauschke, Martin and F\"orstner, Wolfgang},
    booktitle = {Proc. of the OGRW-7-2007, 7th Open German/Russian Workshop on Pattern Recognition and Image Understanding. August 20-23, 2007. Ettlingen, Germany},
    year = {2007},
    abstract = {We present a method for detecting repeated structures, which is applied on facade images for describing the regularity of their windows. Our approach finds and explicitly represents repetitive structures and thus gives initial representation of facades. No explicit notion of a window is used, thus the method also appears to be able to identify other man made structures, e.g. paths with regular tiles. A method for detection of dominant symmetries is adapted for detection of multiply repeated structures. A compact description of the repetitions is derived from the detected translations in the image by a heuristic search method and the criterion of the minimum description length.},
    doi = {10.1134/S1054661808030073},
    url = {https://www.ipb.uni-bonn.de/pdfs/Wenzel2007Detection.pdf},
    }

  • K. M. Wurm, C. Stachniss, G. Grisetti, and W. Burgard, “Improved Simultaneous Localization and Mapping using a Dual Representation of the Environment,” in Proc. of the European Conf. on Mobile Robots (ECMR), Freiburg, Germany, 2007.
    [BibTeX] [PDF]
    [none]
    @InProceedings{wurm2007,
    title = {Improved Simultaneous Localization and Mapping using a Dual Representation of the Environment},
    author = {Wurm, K.M. and Stachniss, C. and Grisetti, G. and Burgard, W.},
    booktitle = ecmr,
    year = {2007},
    address = {Freiburg, Germany},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/wurm07ecmr.pdf},
    }

  • L. Zug, “Untersuchungen zur Genauigkeit der automatischen Punkt- und Orientierungsbestimmung aus extrem großmaßstäbigen Luftbildern,” Master Thesis, 2007.
    [BibTeX]

    Die vorliegende Arbeit untersucht die durch ein vollautomatisches Orientierungsverfahren (Läbe & Förstner) erreichbaren Genauigkeiten eines Bildverbanders anhand der Genauigkeit rekonstruierter Objektpunkt-koordinaten. Für diese liegen aus einer unabhängigen terrestrischen Messung genaue Referenzkoordinaten vor. Zum Vergleich der Wiedersprüche zwischen Referenzkoordinaten und rekonstruierter Koordinaten in einem photogrammetrischen Modell sollte in der Arbeit eine Koordinatentransformation basierend auf der K- und S-Transformation erstellt werden.

    @MastersThesis{zug2007untersuchungen,
    title = {Untersuchungen zur Genauigkeit der automatischen Punkt- und Orientierungsbestimmung aus extrem gro{\ss}ma{\ss}st\"abigen Luftbildern},
    author = {Zug, Laura},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2007},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. Richard Steffen},
    abstract = {Die vorliegende Arbeit untersucht die durch ein vollautomatisches Orientierungsverfahren (L\"abe & F\"orstner) erreichbaren Genauigkeiten eines Bildverbanders anhand der Genauigkeit rekonstruierter Objektpunkt-koordinaten. F\"ur diese liegen aus einer unabh\"angigen terrestrischen Messung genaue Referenzkoordinaten vor. Zum Vergleich der Wiederspr\"uche zwischen Referenzkoordinaten und rekonstruierter Koordinaten in einem photogrammetrischen Modell sollte in der Arbeit eine Koordinatentransformation basierend auf der K- und S-Transformation erstellt werden.},
    city = {Bonn},
    }

2006

  • C. Beder and W. Förstner, “Direct Solutions for Computing Cylinders from Minimal Sets of 3D Points,” in Proc. of the European Conf. on Computer Vision, Graz, Austria, 2006, p. 135–146. doi:10.1007/11744023_11
    [BibTeX] [PDF]

    Efficient direct solutions for the determination of a cylinder from points are presented. The solutions range from the well known direct solution of a quadric to the minimal solution of a cylinder with five points. In contrast to the approach of G. Roth and M. D. Levine (1990), who used polynomial bases for representing the geometric entities, we use algebraic constraints on the quadric representing the cylinder. The solutions for six to eight points directly determine all the cylinder parameters in one step: (1) The eight-point-solution, similar to the estimation of the fundamental matrix, requires to solve for the roots of a 3rd-order-polynomial. (2) The seven-point-solution, similar to the sixpoint- solution for the relative orientation by J. Philip (1996), yields a linear equation system. (3) The six-point-solution, similar to the fivepoint- solution for the relative orientation by D. Nister (2003), yields a ten-by-ten eigenvalue problem. The new minimal five-point-solution first determines the direction and then the position and the radius of the cylinder. The search for the zeros of the resulting 6th order polynomials is e ciently realized using 2D-Bernstein polynomials. Also direct solutions for the special cases with the axes of the cylinder parallel to a coordinate plane or axis are given. The method is used to find cylinders in range data of an industrial site.

    @InProceedings{beder2006direct,
    title = {Direct Solutions for Computing Cylinders from Minimal Sets of 3D Points},
    author = {Beder, Christian and F\"orstner, Wolfgang},
    booktitle = {Proc. of the European Conf. on Computer Vision},
    year = {2006},
    address = {Graz, Austria},
    editor = {A. Leonardis and H. Bischof and A. Pinz},
    number = {3951},
    pages = {135--146},
    publisher = {Springer},
    series = {LNCS},
    abstract = {Efficient direct solutions for the determination of a cylinder from points are presented. The solutions range from the well known direct solution of a quadric to the minimal solution of a cylinder with five points. In contrast to the approach of G. Roth and M. D. Levine (1990), who used polynomial bases for representing the geometric entities, we use algebraic constraints on the quadric representing the cylinder. The solutions for six to eight points directly determine all the cylinder parameters in one step: (1) The eight-point-solution, similar to the estimation of the fundamental matrix, requires to solve for the roots of a 3rd-order-polynomial. (2) The seven-point-solution, similar to the sixpoint- solution for the relative orientation by J. Philip (1996), yields a linear equation system. (3) The six-point-solution, similar to the fivepoint- solution for the relative orientation by D. Nister (2003), yields a ten-by-ten eigenvalue problem. The new minimal five-point-solution first determines the direction and then the position and the radius of the cylinder. The search for the zeros of the resulting 6th order polynomials is e ciently realized using 2D-Bernstein polynomials. Also direct solutions for the special cases with the axes of the cylinder parallel to a coordinate plane or axis are given. The method is used to find cylinders in range data of an industrial site.},
    doi = {10.1007/11744023_11},
    url = {https://www.ipb.uni-bonn.de/pdfs/Beder2006Direct.pdf},
    }

  • C. Beder and W. Förstner, “Direkte Bestimmung von Zylindern aus 3D-Punkten ohne Nutzung von Oberflächennormalen,” in Photogrammetrie – Laserscanning – Optische 3D-Messtechnik, Oldenburg, 2006, p. 206–213.
    [BibTeX] [PDF]

    Die automatische Extraktion von Zylindern aus 3D-Punktwolken ist von zentraler Bedeutung bei der Auswertung von Laserscannerdaten insbesondere bei Industrieanlagen. Das robuste Schätzverfahren RANSAC benötigt direkte Lösungen aus so wenig Datenpunkten wie möglich, um effizient zu arbeiten. Wir werden die algebraischen Bedingungen, die quadratische Formen erfüllen müssen, um einen Zylinder darzustellen, analysieren und verschiedene Verfahren für die Lösung dieses Problems vorstellen. Insbesondere werden wir eine minimale Lösung mit nur fünf 3D Punkten präsentieren. Anders als andere Ansätze benötigen wir keine Oberflächennormalen, deren Bestimmung im Allgemeinen schwierig ist.

    @InProceedings{beder2006direkte,
    title = {Direkte Bestimmung von Zylindern aus 3D-Punkten ohne Nutzung von Oberfl\"achennormalen},
    author = {Beder, Christian and F\"orstner, Wolfgang},
    booktitle = {Photogrammetrie - Laserscanning - Optische 3D-Messtechnik},
    year = {2006},
    address = {Oldenburg},
    editor = {Thomas Luhmann and Christina M\"uller},
    pages = {206--213},
    publisher = {Herbert Wichmann Verlag},
    abstract = {Die automatische Extraktion von Zylindern aus 3D-Punktwolken ist von zentraler Bedeutung bei der Auswertung von Laserscannerdaten insbesondere bei Industrieanlagen. Das robuste Sch\"atzverfahren RANSAC ben\"otigt direkte L\"osungen aus so wenig Datenpunkten wie m\"oglich, um effizient zu arbeiten. Wir werden die algebraischen Bedingungen, die quadratische Formen erf\"ullen m\"ussen, um einen Zylinder darzustellen, analysieren und verschiedene Verfahren f\"ur die L\"osung dieses Problems vorstellen. Insbesondere werden wir eine minimale L\"osung mit nur f\"unf 3D Punkten pr\"asentieren. Anders als andere Ans\"atze ben\"otigen wir keine Oberfl\"achennormalen, deren Bestimmung im Allgemeinen schwierig ist.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Beder2006Direkte.pdf},
    }

  • C. Beder and R. Steffen, “Determining an initial image pair for fixing the scale of a 3d reconstruction from an image sequence,” in Pattern Recognition, Berlin, 2006, p. 657–666. doi:10.1007/11861898_66
    [BibTeX] [PDF]

    Algorithms for metric 3d reconstruction of scenes from calibrated image sequences always require an initialization phase for fixing the scale of the reconstruction. Usually this is done by selecting two frames from the sequence and fixing the length of their base-line. In this paper a quality measure, that is based on the uncertainty of the reconstructed scene points, for the selection of such a stable image pair is proposed. Based on this quality measure a fully automatic initialization phase for simultaneous localization and mapping algorithms is derived. The proposed algorithm runs in real-time and some results for synthetic as well as real image sequences are shown.

    @InProceedings{beder2006determining,
    title = {Determining an initial image pair for fixing the scale of a 3d reconstruction from an image sequence},
    author = {Beder, Christian and Steffen, Richard},
    booktitle = {Pattern Recognition},
    year = {2006},
    address = {Berlin},
    editor = {K. Franke and K.-R. M\"uller and B. Nickolay and R. Sch\"afer},
    number = {4174},
    pages = {657--666},
    publisher = {Springer},
    series = {LNCS},
    abstract = {Algorithms for metric 3d reconstruction of scenes from calibrated image sequences always require an initialization phase for fixing the scale of the reconstruction. Usually this is done by selecting two frames from the sequence and fixing the length of their base-line. In this paper a quality measure, that is based on the uncertainty of the reconstructed scene points, for the selection of such a stable image pair is proposed. Based on this quality measure a fully automatic initialization phase for simultaneous localization and mapping algorithms is derived. The proposed algorithm runs in real-time and some results for synthetic as well as real image sequences are shown.},
    doi = {10.1007/11861898_66},
    url = {https://www.ipb.uni-bonn.de/pdfs/Beder2006Determining.pdf},
    }

  • M. Bennewitz, C. Stachniss, W. Burgard, and S. Behnke, “Metric Localization with Scale-Invariant Visual Features using a Single Perspective Camera,” in European Robotics Symposium 2006, 2006, p. 143–157.
    [BibTeX] [PDF]
    [none]
    @InProceedings{bennewitz2006,
    title = {Metric Localization with Scale-Invariant Visual Features using a Single Perspective Camera},
    author = {Bennewitz, M. and Stachniss, C. and Burgard, W. and Behnke, S.},
    booktitle = {European Robotics Symposium 2006},
    year = {2006},
    editor = {H.I. Christiensen},
    pages = {143--157},
    publisher = {Springer-Verlag Berlin Heidelberg, Germany},
    series = springerstaradvanced,
    volume = {22},
    abstract = {[none]},
    isbn = {3-540-32688-X},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/bennewitz06euros.pdf},
    }

  • T. Dickscheid, “Markerlose Selbstlokalisation durch Fusion von Sensordaten,” Diplomarbeit Master Thesis, 2006.
    [BibTeX]
    [none]
    @MastersThesis{dickscheid2006markerlose,
    title = {Markerlose Selbstlokalisation durch Fusion von Sensordaten},
    author = {Dickscheid, Timo},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2006},
    note = {Betreuung: Prof. Dr.-Ing. Dietrich Paulus (Universit\"at Koblenz), Dr.-Ing. Chunrong Yuan (Fraunhofer FIT)},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • M. Drauschke, “Automatisches Dodging von Luftbildern,” Department of Photogrammetry, University of Bonn, TR-IGG-P-2006-01, 2006.
    [BibTeX] [PDF]

    Das Problem stellt sich wie folgt dar: Die Luftbilder wurden mit einem Vexcel-Scanner digitalisiert und als 16-Bit-Bilder abgespeichert. Die Bilder sollen automatisch nachbereitet werden.

    @TechReport{drauschke2006automatisches,
    title = {Automatisches Dodging von Luftbildern},
    author = {Drauschke, Martin},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2006},
    number = {TR-IGG-P-2006-01},
    abstract = {Das Problem stellt sich wie folgt dar: Die Luftbilder wurden mit einem Vexcel-Scanner digitalisiert und als 16-Bit-Bilder abgespeichert. Die Bilder sollen automatisch nachbereitet werden.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Drauschke2006Automatisches.pdf},
    }

  • M. Drauschke, H. Schuster, and W. Förstner, “Detectibility of Buildings in Aerial Images over Scale Space,” in Symposium of ISPRS Commission III: Photogrammetric Computer Vision, Bonn, 2006, p. 7–12.
    [BibTeX] [PDF]

    Automatic scene interpretation of aerial images is a major purpose of photogrammetry. Therefore, we want to improve building detection by exploring the “life-time” of stable and relevant image features in scale space. We use watersheds for feature extraction to gain a topologically consistent map. We will show that characteristic features for building detection can be found in all considered scales, so that no optimal scale can be selected for building recognition. Nevertheless, many of these features “live” in a wide scale interval, so that a combination of a small number of scales can be used for automatic building detection.

    @InProceedings{drauschke2006detectibility,
    title = {Detectibility of Buildings in Aerial Images over Scale Space},
    author = {Drauschke, Martin and Schuster, Hanns-Florian and F\"orstner, Wolfgang},
    booktitle = {Symposium of ISPRS Commission III: Photogrammetric Computer Vision},
    year = {2006},
    address = {Bonn},
    editor = {Wolfgang F\"orstner and Richard Steffen},
    month = sep,
    number = {Part 3},
    organization = {ISPRS},
    pages = {7--12},
    publisher = {ISPRS},
    volume = {XXXVI},
    abstract = {Automatic scene interpretation of aerial images is a major purpose of photogrammetry. Therefore, we want to improve building detection by exploring the "life-time" of stable and relevant image features in scale space. We use watersheds for feature extraction to gain a topologically consistent map. We will show that characteristic features for building detection can be found in all considered scales, so that no optimal scale can be selected for building recognition. Nevertheless, many of these features "live" in a wide scale interval, so that a combination of a small number of scales can be used for automatic building detection.},
    keywords = {Building Detection, Scale Space, Feature Extraction, Stable Regions},
    url = {https://www.ipb.uni-bonn.de/pdfs/Drauschke2006Detectibility.pdf},
    }

  • M. Drauschke, H. Schuster, and W. Förstner, “Stabilität von Regionen im Skalenraum,” in Publikationen der DGPF: Geoinformatik und Erdbeobachtung, Berlin, 2006, p. 29–36.
    [BibTeX] [PDF]

    Für die automatische Erfassung von Gebäuden aus Luftbildern ist es nützlich, Bildstrukturen im Skalenraum, d. h. über mehrere Auflösungsstufen zu beobachten, um für die Objekterkennung hinderliche Details ausblenden zu können. Große Bedeutung messen wir dabei den homogenen Regionen sowie deren Nachbarschaften zu. Regionen betrachten wir als stabil, wenn sie über einen mehrere Skalenstufen invariant bleiben. Sie haben spezielle Eigenschaften: Beim Vergrössern der Skala verschmelzen benachbarte Regionen, wobei eine Region immer vollständig in der anderen aufgeht. Diese speziellen Eigenschaft erleichtert das Bestimmen der Nachbarschaften in einer vorgegeben Skala, denn der Regionennachbarschaftsgraph (RNG) muss nur einmal auf der untersten Ebene des Skalenraums berechnet werden. Die RNGs in den anderen Ebenen können leicht aus der untersten Ebene berechnet werden.

    @InProceedings{drauschke2006stabilitat,
    title = {Stabilit\"at von Regionen im Skalenraum},
    author = {Drauschke, Martin and Schuster, Hanns-Florian and F\"orstner, Wolfgang},
    booktitle = {Publikationen der DGPF: Geoinformatik und Erdbeobachtung},
    year = {2006},
    address = {Berlin},
    editor = {Eckhardt Seyfert},
    month = {Septermber},
    pages = {29--36},
    publisher = {DGPF},
    volume = {15},
    abstract = {F\"ur die automatische Erfassung von Geb\"auden aus Luftbildern ist es n\"utzlich, Bildstrukturen im Skalenraum, d. h. \"uber mehrere Aufl\"osungsstufen zu beobachten, um f\"ur die Objekterkennung hinderliche Details ausblenden zu k\"onnen. Gro{\ss}e Bedeutung messen wir dabei den homogenen Regionen sowie deren Nachbarschaften zu. Regionen betrachten wir als stabil, wenn sie \"uber einen mehrere Skalenstufen invariant bleiben. Sie haben spezielle Eigenschaften: Beim Vergr\"ossern der Skala verschmelzen benachbarte Regionen, wobei eine Region immer vollst\"andig in der anderen aufgeht. Diese speziellen Eigenschaft erleichtert das Bestimmen der Nachbarschaften in einer vorgegeben Skala, denn der Regionennachbarschaftsgraph (RNG) muss nur einmal auf der untersten Ebene des Skalenraums berechnet werden. Die RNGs in den anderen Ebenen k\"onnen leicht aus der untersten Ebene berechnet werden.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Drauschke2006Stabilitaet.pdf},
    }

  • A. Gil, O. Reinoso, O. Martínez-Mozos, C. Stachniss, and W. Burgard, “Improving Data Association in Vision-based SLAM,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), Beijing, China, 2006.
    [BibTeX]
    [none]
    @InProceedings{gil2006,
    title = {Improving Data Association in Vision-based {SLAM}},
    author = {Gil, A. and Reinoso, O. and Mart\'{i}nez-Mozos, O. and Stachniss, C. and Burgard, W.},
    booktitle = iros,
    year = {2006},
    address = {Beijing, China},
    abstract = {[none]},
    timestamp = {2014.04.24},
    }

  • G. Grisetti, G. D. Tipaldi, C. Stachniss, W. Burgard, and D. Nardi, “Speeding-Up Rao-Blackwellized SLAM,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Orlando, FL, USA, 2006, p. 442–447.
    [BibTeX] [PDF]
    [none]
    @InProceedings{grisetti2006,
    title = {Speeding-Up Rao-Blackwellized {SLAM}},
    author = {Grisetti, G. and Tipaldi, G.D. and Stachniss, C. and Burgard, W. and Nardi, D.},
    booktitle = icra,
    year = {2006},
    address = {Orlando, FL, USA},
    pages = {442--447},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/grisetti06icra.pdf},
    }

  • H. Hellwich, “Bestimmung der Eigenbewegung anhand einer monokularen Bildfolge,” Diplomarbeit Master Thesis, 2006.
    [BibTeX]
    [none]
    @MastersThesis{hellwich2006bestimmung,
    title = {Bestimmung der Eigenbewegung anhand einer monokularen Bildfolge},
    author = {Hellwich, Hendrik},
    school = {Institute of Photogrammetry, University of Bonn In Zusammenarbeit mit dem Institut f\"ur Informatik der Universit\"at Bonn},
    year = {2006},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. Richard Steffen},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • A. Kesting, “Bild-basierte Baumkronenmodellierung mit Kugelflächenfunktionen,” Diplomarbeit Master Thesis, 2006.
    [BibTeX]
    [none]
    @MastersThesis{kesting2006bild,
    title = {Bild-basierte Baumkronenmodellierung mit Kugelfl\"achenfunktionen},
    author = {Kesting, Arne},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2006},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Inform. Christian Beder},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • T. Läbe and W. Förstner, “Automatic Relative Orientation of Images,” in Proc. of the 5th Turkish-German Joint Geodetic Days, Berlin, 2006.
    [BibTeX] [PDF]

    This paper presents a new full automatic approach for the relative orientation of several digital images taken with a calibrated camera. This approach uses new algorithms for feature extraction and relative orientation developed in the last few years. There is no need for special markers in the scene nor for approximate values of the orientation data. We use the point operator developed by D. G. Lowe (2004), which extracts points with scale- and rotation-invariant descriptors (SIFT-features). These descriptors allow a successful matching of image points even when dealing with highly convergent or rotated images. The approach consists of the following steps: After extracting image points on all images a matching between every image pair is calculated using the SIFT parameters only. No prior information about the pose of the images or the overlapping parts of the images is used. For every image pair a relative orientation is computed with the help of a RANSAC procedure. Here we use the new 5-point algorithm from D. Nister (2004). Out of this set of orientations approximate values for the orientation parameters and the object coordinates are calculated by computing the relative scales and transforming the models into a common coordinate system. Several tests are made in order to get a reliable input for the currently final step: a bundle block adjustment. The paper discusses the practical impacts of the used algorithms. Examples of different indoor- and outdoor-scenes including a data set of oblique images taken from a helicopter are presented and the results of the approach applied to these data sets are evaluated. These results show that the approach can be used for a wide range of scenes with different types of the image geometry and taken with different types of cameras including inexpensive consumer cameras. In particular we investigate in the robustness of the algorithms, e. g. in geometric tests on image triplets. Further developments like the use of image pyramids with a modified matching are discussed in the outlook. Literature: David G. Lowe, Distinctive image features from scale-invariant keypoints, International Journal of Computer Vision, 60, 2 (2004), pp. 91-110. D. Nister, An efficient solution to the five-point relative pose problem, IEEE Transactions on Pattern Analysis and Machine Intelligence (PAMI), 26(6):756-770, June 2004.

    @InProceedings{labe2006automatic,
    title = {Automatic Relative Orientation of Images},
    author = {L\"abe, Thomas and F\"orstner, Wolfgang},
    booktitle = {Proc. of the 5th Turkish-German Joint Geodetic Days},
    year = {2006},
    address = {Berlin},
    abstract = {This paper presents a new full automatic approach for the relative orientation of several digital images taken with a calibrated camera. This approach uses new algorithms for feature extraction and relative orientation developed in the last few years. There is no need for special markers in the scene nor for approximate values of the orientation data. We use the point operator developed by D. G. Lowe (2004), which extracts points with scale- and rotation-invariant descriptors (SIFT-features). These descriptors allow a successful matching of image points even when dealing with highly convergent or rotated images. The approach consists of the following steps: After extracting image points on all images a matching between every image pair is calculated using the SIFT parameters only. No prior information about the pose of the images or the overlapping parts of the images is used. For every image pair a relative orientation is computed with the help of a RANSAC procedure. Here we use the new 5-point algorithm from D. Nister (2004). Out of this set of orientations approximate values for the orientation parameters and the object coordinates are calculated by computing the relative scales and transforming the models into a common coordinate system. Several tests are made in order to get a reliable input for the currently final step: a bundle block adjustment. The paper discusses the practical impacts of the used algorithms. Examples of different indoor- and outdoor-scenes including a data set of oblique images taken from a helicopter are presented and the results of the approach applied to these data sets are evaluated. These results show that the approach can be used for a wide range of scenes with different types of the image geometry and taken with different types of cameras including inexpensive consumer cameras. In particular we investigate in the robustness of the algorithms, e. g. in geometric tests on image triplets. Further developments like the use of image pyramids with a modified matching are discussed in the outlook. Literature: David G. Lowe, Distinctive image features from scale-invariant keypoints, International Journal of Computer Vision, 60, 2 (2004), pp. 91-110. D. Nister, An efficient solution to the five-point relative pose problem, IEEE Transactions on Pattern Analysis and Machine Intelligence (PAMI), 26(6):756-770, June 2004.},
    city = {Bonn},
    proceeding = {Proc. of the 5th Turkish-German Joint Geodetic Days},
    url = {https://www.ipb.uni-bonn.de/pdfs/Labe2006Automatic.pdf},
    }

  • P. Lamon, C. Stachniss, R. Triebel, P. Pfaff, C. Plagemann, G. Grisetti, S. Kolski, W. Burgard, and R. Siegwart, “Mapping with an Autonomous Car,” in iroswsnav, Beijing, China, 2006.
    [BibTeX] [PDF]
    [none]
    @InProceedings{lamon2006,
    title = {Mapping with an Autonomous Car},
    author = {Lamon, P. and Stachniss, C. and Triebel, R. and Pfaff, P. and Plagemann, C. and Grisetti, G. and Kolski, S. and Burgard, W. and Siegwart, R.},
    booktitle = iroswsnav,
    year = {2006},
    address = {Beijing, China},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/lamon06iros.pdf},
    }

  • D. Meier, C. Stachniss, and W. Burgard, “Cooperative Exploration With Multiple Robots Using Low Bandwidth Communication,” in Informationsfusion in der Mess- und Sensortechnik, 2006, p. 145–157.
    [BibTeX] [PDF]
    [none]
    @InProceedings{meier2006,
    title = {Cooperative Exploration With Multiple Robots Using Low Bandwidth Communication},
    author = {Meier, D. and Stachniss, C. and Burgard, W.},
    booktitle = {Informationsfusion in der Mess- und Sensortechnik},
    year = {2006},
    editor = {Beyerer, J. and Puente Le\'{o}n, F. and Sommer, K.-D.},
    pages = {145--157},
    abstract = {[none]},
    isbn = {3-86644-053-7},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/meier06sensor.pdf},
    }

  • C. Plagemann, C. Stachniss, and W. Burgard, “Efficient Failure Detection for Mobile Robots using Mixed-Abstraction Particle Filters,” in European Robotics Symposium 2006, 2006, p. 93–107.
    [BibTeX] [PDF]
    [none]
    @InProceedings{plagemann2006,
    title = {Efficient Failure Detection for Mobile Robots using Mixed-Abstraction Particle Filters},
    author = {Plagemann, C. and Stachniss, C. and Burgard, W.},
    booktitle = {European Robotics Symposium 2006},
    year = {2006},
    editor = {H.I. Christiensen},
    pages = {93--107},
    publisher = {Springer-Verlag Berlin Heidelberg, Germany},
    series = springerstaradvanced,
    volume = {22},
    abstract = {[none]},
    isbn = {3-540-32688-X},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/plagemann06euros.pdf},
    }

  • D. Sonntag, S. Stachniss-Carp, C. Stachniss, and V. Stachniss, “Determination of Root Canal Curvatures before and after Canal Preparation (Part II): A Method based on Numeric Calculus,” Aust Endod J, vol. 32, p. 16–25, 2006.
    [BibTeX] [PDF]
    [none]
    @Article{sonntag2006,
    title = {Determination of Root Canal Curvatures before and after Canal Preparation (Part II): A Method based on Numeric Calculus},
    author = {Sonntag, D. and Stachniss-Carp, S. and Stachniss, C. and Stachniss, V.},
    journal = {Aust Endod J},
    year = {2006},
    pages = {16--25},
    volume = {32},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/sonntag06endod.pdf},
    }

  • C. Stachniss, “Exploration and Mapping with Mobile Robots,” PhD Thesis, 2006.
    [BibTeX] [PDF]
    [none]
    @PhDThesis{stachniss2006a,
    title = {Exploration and Mapping with Mobile Robots},
    author = {Stachniss, C.},
    school = {University of Freiburg, Department of Computer Science},
    year = {2006},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/stachniss06phd.pdf},
    }

  • C. Stachniss, O. Martínez-Mozos, and W. Burgard, “Speeding-Up Multi-Robot Exploration by Considering Semantic Place Information,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Orlando, FL, USA, 2006, p. 1692–1697.
    [BibTeX] [PDF]
    [none]
    @InProceedings{stachniss2006,
    title = {Speeding-Up Multi-Robot Exploration by Considering Semantic Place Information},
    author = {Stachniss, C. and Mart\'{i}nez-Mozos, O. and Burgard, W.},
    booktitle = icra,
    year = {2006},
    address = {Orlando, FL, USA},
    pages = {1692--1697},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/stachniss06icra.pdf},
    }

  • J. Thielmann, “Entwurf und Evaluierung eines Verfahrens zur Detektion wiederholter Bildstrukturen,” Diplomarbeit Master Thesis, 2006.
    [BibTeX]

    Wiederholte Strukturen sind charakteristisch für künstliche Objekte, verursachen jedoch gleichzeitig für Zuordnungsverfahren eine sehr hohe algorithmische Komplexität, weshalb Verfahren zur Identifikation wiederholter Strukturen von besonderem Interesse sind. In der Arbeit soll das verfahren von Schaffalitzky und Zisserman (2000) auf seine Eignung für die Detektion wiederholter Strukturen in Bildern von Gebäuden untersucht und bewertet werden.

    @MastersThesis{thielmann2006entwurf,
    title = {Entwurf und Evaluierung eines Verfahrens zur Detektion wiederholter Bildstrukturen},
    author = {Thielmann, Jan},
    year = {2006},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Inform. Martin Drauschke},
    type = {Diplomarbeit},
    abstract = {Wiederholte Strukturen sind charakteristisch f\"ur k\"unstliche Objekte, verursachen jedoch gleichzeitig f\"ur Zuordnungsverfahren eine sehr hohe algorithmische Komplexit\"at, weshalb Verfahren zur Identifikation wiederholter Strukturen von besonderem Interesse sind. In der Arbeit soll das verfahren von Schaffalitzky und Zisserman (2000) auf seine Eignung f\"ur die Detektion wiederholter Strukturen in Bildern von Geb\"auden untersucht und bewertet werden.},
    city = {Bonn},
    }

  • B. Waske and S. Schiefer, “Classifying segmented multitemporal SAR data from agricultural areas using support vector machines,” in 2nd Workshop of the EARSeL Special Interest Group on Land Use and Land Cover, 2006.
    [BibTeX] [PDF]

    In the presented study the performance of support vector machines (SVM) for classifying segmented multi-temporal SAR data is investigated. Results show that multi-temporal SAR data from an area dominated by agriculture can be successfully classified using SVM. Classification accuracy (78.2%) and degree of differentiation between land cover types is similar or better than results achieved with a decision tree classifier. A positive influence of image segmentation on classification results can be reported which varies with object size. A comparison of classification results derived on different aggregation levels shows, that a medium segment size should be preferred. It is better to work with segments that are smaller than the natural features of interest and segments that are greater than natural features should be avoided.

    @InProceedings{waske2006classifying,
    title = {Classifying segmented multitemporal SAR data from agricultural areas using support vector machines},
    author = {Waske, Bj\"orn and Schiefer, Sebastian},
    booktitle = {2nd Workshop of the EARSeL Special Interest Group on Land Use and Land Cover},
    year = {2006},
    abstract = {In the presented study the performance of support vector machines (SVM) for classifying segmented multi-temporal SAR data is investigated. Results show that multi-temporal SAR data from an area dominated by agriculture can be successfully classified using SVM. Classification accuracy (78.2%) and degree of differentiation between land cover types is similar or better than results achieved with a decision tree classifier. A positive influence of image segmentation on classification results can be reported which varies with object size. A comparison of classification results derived on different aggregation levels shows, that a medium segment size should be preferred. It is better to work with segments that are smaller than the natural features of interest and segments that are greater than natural features should be avoided.},
    owner = {waske},
    timestamp = {2012.09.05},
    url = {https://www.ipb.uni-bonn.de/pdfs/Waske2006Classifying.pdf},
    }

  • B. Waske, S. Schiefer, and M. Braun, “Random Feature Selection for Decision Tree Classification of Multi-temporal SAR Data,” in IEEE International Geoscience and Remote Sensing Symposium (IGARSS), 2006. doi:10.1109/IGARSS.2006.48
    [BibTeX]
    [none]
    @InProceedings{waske2006random,
    title = {Random Feature Selection for Decision Tree Classification of Multi-temporal SAR Data},
    author = {Waske, Bj\"orn and Schiefer, S. and Braun, M.},
    booktitle = {IEEE International Geoscience and Remote Sensing Symposium (IGARSS)},
    year = {2006},
    abstract = {[none]},
    doi = {10.1109/IGARSS.2006.48},
    keywords = {decision tree classification;multiple classifiers size;multitemporal SAR images;random feature selection;supervised land cover classifications;visual inspection;decision trees;feature extraction;geophysics computing;image classification;synthetic aperture radar;},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • S. Wenzel, “Detektion wiederholter und symmetrischer Strukturen von Objekten in Bildern,” Diplomarbeit Master Thesis, 2006.
    [BibTeX] [PDF]

    Sich wiederholende bzw. symmetrische Strukturen sind Hinweise auf künstliche Objekte, führen aber auch zu Schwierigkeiten bei klassischen Bildzuordnungsverfahren. Die Suche und Gruppierung zusammengehöriger Features kann daher zur Identifikation künstlicher Objekte oder zur Verbesserung von Zuordnungsverfahren dienen. Darüber hinaus kann man aus einem Bild eines im Raum symmetrischen Objekts auf die 3D-Struktur dieses Objekts schließen. Die Diplomarbeit soll das von Loy und Eklundh auf der ECCV 2006 vorgestellte Verfahren zur Detektion symmetrischer und wiederholter Bildbereiche implementieren und hinsichtlich seiner Verwendbarkeit für photogrammetrische Gebäudeaufnahmen überprüfen. Insbesondere geht es um die Detektierbarkeit regelmäßiger Fassadenstrukturen in Abhängigkeit von ihrer Komplexität. Darüber hinaus ist zu klären, wie mehrfache Symmetrien identifiziert und ggf. für die 3D-Rekonstruktion des regelmä\ssigen Teils der Fassadenstruktur genutzt werden können.

    @MastersThesis{wenzel2006detektion,
    title = {Detektion wiederholter und symmetrischer Strukturen von Objekten in Bildern},
    author = {Wenzel, Susanne},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2006},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Inform. Martin Drauschke},
    type = {Diplomarbeit},
    abstract = {Sich wiederholende bzw. symmetrische Strukturen sind Hinweise auf k\"unstliche Objekte, f\"uhren aber auch zu Schwierigkeiten bei klassischen Bildzuordnungsverfahren. Die Suche und Gruppierung zusammengeh\"origer Features kann daher zur Identifikation k\"unstlicher Objekte oder zur Verbesserung von Zuordnungsverfahren dienen. Dar\"uber hinaus kann man aus einem Bild eines im Raum symmetrischen Objekts auf die 3D-Struktur dieses Objekts schlie{\ss}en. Die Diplomarbeit soll das von Loy und Eklundh auf der ECCV 2006 vorgestellte Verfahren zur Detektion symmetrischer und wiederholter Bildbereiche implementieren und hinsichtlich seiner Verwendbarkeit f\"ur photogrammetrische Geb\"audeaufnahmen \"uberpr\"ufen. Insbesondere geht es um die Detektierbarkeit regelm\"a{\ss}iger Fassadenstrukturen in Abh\"angigkeit von ihrer Komplexit\"at. Dar\"uber hinaus ist zu kl\"aren, wie mehrfache Symmetrien identifiziert und ggf. f\"ur die 3D-Rekonstruktion des regelm\"a\ssigen Teils der Fassadenstruktur genutzt werden k\"onnen.},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Wenzel2006Detektion.pdf},
    }

  • K. Wolff, “Zur Approximation allgemeiner optischer Abbildungsmodelle und deren Anwendung auf eine geometrisch basierte Mehrbildzuordnung am Beispiel einer Mehrmedienabbildung,” PhD Thesis, 2006.
    [BibTeX]

    \textbf{Summary} Non perspective mappings include geometric elements, which have an direct influence on their geometric characteristics but which are more or less unknown in the field of photogrammetry. An example for such an element is the type of a viewing point, which can be a single point, a line or a surface (non single points). Another example is the way of mapping rays. Both influence the image distortions depending on the position of the image point in image space or the object point in object space. This work basically deals with the geometry of viewing points, the resulting image distortion and their relevance for a three dimensional photogrammetric reconstruction. The work focuses the following four points: Introduction of a new representation and taxonomy for optical imaging systems. One feature of optical mappings is the type of image distortion, which is important for the complexity of a photogrammetric process. It may either base on the position of the image point in image space or the object point in object space, which is much more complex. The image distortion and its modeling are being directly influenced by a single point or non single point viewpoint. In this context it is important that an imaging system may have more than one viewing point. A corresponding analysis of imaging systems is realised in the field of photogrammetry only under condition of some of these aspects. Here, a global analysis is given, which takes all these mentioned aspects into account, and a new resulting representation and taxonomy for optical imaging systems is introduced. Development of a general, efficient approximation method for imaging systems. Non perspective mappings, which have object space based image distortions, might have very complex and specialised mapping models. This is motivation for the development of an efficient and general approximation method for complex, object space based distorted mappings by a simplified model. By a definition of special requirements for the realisation, a significant influence of the approximation error on the quality of the final results can be prevented. A priori quality analyses of a 3d reconstruction support this assumption. Development of a geometrically based matching algorithm for multiple views. The approximation method is used for a new geometry based matching algorithm for 3D reconstruction, which will also be presented here. Different tests using synthetic and real data analyse and evaluate the methodology of the approximation, of the image matching and the 3D object reconstruction for multiple views and show the efficiency of both methods. Quality tests by a photogrammetric reconstruction of a fluvial sediment surface with multi media geometry. As an application example of the methods, a fluvial sediment surface is observed through the three optical media air, Perspex and water and is reconstructed in 3D. This application is also a test of the use of photogrammetric methods for preparing input data for a synthetic analysis of the sedimentation process. The results indicate that photogrammetric methods are applicable for this task. \textbf{Zusammenfassung} Nicht perspektivische Abbildungen besitzen geometrische Elemente, die auf ihre geometrischen Eigenschaften einen direkten Einfluss haben, im Bereich der Photogrammetrie aber kaum bekannt sind. Ein solches Element ist zum Beispiel die Art eines Projektionszentrums, das punkt-, linien- oder flächenförmig sein kann und der Verlauf der Abbildungsstrahlen, die beide die Abhängigkeit der Verzeichnung von der Position des Bildpunktes im Bildraum oder des zugehörigen Objektpunktes im Objektraum beeinflussen. Diese Arbeit behandelt grundlegend die Geometrie von Projektionszentren, den daraus resultierenden Bildverzeichnungen und ihre Bedeutung für eine dreidimensionale photogrammetrische Auswertung. Zusammenfassend umfasst diese Arbeit die folgenden vier Schwerpunkte: Einführung einer neuen Repräsentation und Taxonomie optischer Abbildungssysteme. Ein für die Komplexität einer photogrammetrischen Auswertung entscheidendes Merkmal optischer Abbildungen ist die Art ihrer Bildfehler, die bildraumbasiert oder objektraumbasiert sein können. Die Bildfehler und ihre Modellierung werden direkt durch das punktförmige oder nicht punktförmige Projektionszentrum des abbildenden Strahlenbündels beeinflusst. Dabei kann ein Abbildungssystem streng genommen mehr als ein Projektionszentrum besitzen. Eine entsprechende Analyse optischer Systeme ist im Bereich der Photogrammetrie bisher nur unter Berücksichtigung von einzelnen dieser Aspekte durchgeführt worden und wird hier zusammen mit einer resultierenden neuen Repräsentation und Taxonomie umfassender dargestellt. Vorstellung eines allgemeinen, effizienten Approximationsmodells optischer Abbildungssysteme. Nicht perspektivische Abbildungen, die eine objektraumbasierte Verzeichnung besitzen, können sehr komplexe und spezialisierte Abbildungsmodelle besitzen. Dies ist Motivation für die Entwicklung eines effizienten und allgemein gültigen Approximationsmodells komplexer, objektraumbasiert verzeichneter Abbildungen durch ein einfaches Modell. Durch bestimmte Anforderungen an den Einsatz des Modells wird ein signifikanter Einfluss auf die Endergebnisse verhindert. Apriori Qualitätsuntersuchungen für eine dreidimensionale Objektrekonstruktion bestätigen dies. Entwicklung eines geometrisch basierten Zuordnungsalgorithmus im Mehrbildverband. Das Approximationsmodell wird innerhalb eines ebenfalls hier vorgestellten, geometrisch basierten Zuordnungsverfahrens innerhalb einer Objektrekonstruktion angewendet. Verschiedene Tests mit synthetischen und realen Daten analysieren und bewerten die Methodik der Approximation, der Bildzuordnung und Objektrekonstruktion im Mehrbildverband und zeigen einen effektiven Einsatz der beiden Verfahren. Qualitätsanalysen mittels photogrammetrischer Rekonstruktion von fluvialen Sedimentoberflächen unter optischen Mehrmedienbedingungen. Als Anwendungsbeispiel wird eine fluviale Sedimentoberfläche durch die Medien Luft, Plexiglas und Wasser hindurch beobachtet und rekonstruiert. Mit diesem Beispiel wird gleichzeitig die Einsatzmöglichkeiten photogrammetrischer Methoden für die Erzeugung von Eingangsdaten für eine Analyse eines dynamischen, fluvialen Sedimentationsprozesses geprüft. Die Ergebnisse zeigen, dass photogrammetrische Methoden für die Lösung dieser Aufgabenstellung grundsätzlich anwendbar sind.

    @PhDThesis{wolff2006zur,
    title = {Zur Approximation allgemeiner optischer Abbildungsmodelle und deren Anwendung auf eine geometrisch basierte Mehrbildzuordnung am Beispiel einer Mehrmedienabbildung},
    author = {Wolff, Kirsten},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2006},
    abstract = {\textbf{Summary} Non perspective mappings include geometric elements, which have an direct influence on their geometric characteristics but which are more or less unknown in the field of photogrammetry. An example for such an element is the type of a viewing point, which can be a single point, a line or a surface (non single points). Another example is the way of mapping rays. Both influence the image distortions depending on the position of the image point in image space or the object point in object space. This work basically deals with the geometry of viewing points, the resulting image distortion and their relevance for a three dimensional photogrammetric reconstruction. The work focuses the following four points: Introduction of a new representation and taxonomy for optical imaging systems. One feature of optical mappings is the type of image distortion, which is important for the complexity of a photogrammetric process. It may either base on the position of the image point in image space or the object point in object space, which is much more complex. The image distortion and its modeling are being directly influenced by a single point or non single point viewpoint. In this context it is important that an imaging system may have more than one viewing point. A corresponding analysis of imaging systems is realised in the field of photogrammetry only under condition of some of these aspects. Here, a global analysis is given, which takes all these mentioned aspects into account, and a new resulting representation and taxonomy for optical imaging systems is introduced. Development of a general, efficient approximation method for imaging systems. Non perspective mappings, which have object space based image distortions, might have very complex and specialised mapping models. This is motivation for the development of an efficient and general approximation method for complex, object space based distorted mappings by a simplified model. By a definition of special requirements for the realisation, a significant influence of the approximation error on the quality of the final results can be prevented. A priori quality analyses of a 3d reconstruction support this assumption. Development of a geometrically based matching algorithm for multiple views. The approximation method is used for a new geometry based matching algorithm for 3D reconstruction, which will also be presented here. Different tests using synthetic and real data analyse and evaluate the methodology of the approximation, of the image matching and the 3D object reconstruction for multiple views and show the efficiency of both methods. Quality tests by a photogrammetric reconstruction of a fluvial sediment surface with multi media geometry. As an application example of the methods, a fluvial sediment surface is observed through the three optical media air, Perspex and water and is reconstructed in 3D. This application is also a test of the use of photogrammetric methods
    for preparing input data for a synthetic analysis of the sedimentation process. The results indicate that photogrammetric methods are applicable for this task. \textbf{Zusammenfassung} Nicht perspektivische Abbildungen besitzen geometrische Elemente, die auf ihre geometrischen Eigenschaften einen direkten Einfluss haben, im Bereich der Photogrammetrie aber kaum bekannt sind. Ein solches Element ist zum Beispiel die Art eines Projektionszentrums, das punkt-, linien- oder fl\"achenf\"ormig sein kann und der Verlauf der Abbildungsstrahlen, die beide die Abh\"angigkeit der Verzeichnung von der Position des Bildpunktes im Bildraum oder des zugeh\"origen Objektpunktes im Objektraum beeinflussen. Diese Arbeit behandelt grundlegend die Geometrie von Projektionszentren, den daraus resultierenden Bildverzeichnungen und ihre Bedeutung f\"ur eine dreidimensionale photogrammetrische Auswertung. Zusammenfassend umfasst diese Arbeit die folgenden vier Schwerpunkte: Einf\"uhrung einer neuen Repr\"asentation und Taxonomie optischer Abbildungssysteme. Ein f\"ur die Komplexit\"at einer photogrammetrischen Auswertung entscheidendes Merkmal optischer Abbildungen ist die Art ihrer Bildfehler, die bildraumbasiert oder objektraumbasiert sein k\"onnen. Die Bildfehler und ihre Modellierung werden direkt durch das punktf\"ormige oder nicht punktf\"ormige Projektionszentrum des abbildenden Strahlenb\"undels beeinflusst. Dabei kann ein Abbildungssystem streng genommen mehr als ein Projektionszentrum besitzen. Eine entsprechende Analyse optischer Systeme ist im Bereich der Photogrammetrie bisher nur unter Ber\"ucksichtigung von einzelnen dieser Aspekte durchgef\"uhrt worden und wird hier zusammen mit einer resultierenden neuen Repr\"asentation und Taxonomie umfassender dargestellt. Vorstellung eines allgemeinen, effizienten Approximationsmodells optischer Abbildungssysteme. Nicht perspektivische Abbildungen, die eine objektraumbasierte Verzeichnung besitzen, k\"onnen sehr komplexe und spezialisierte Abbildungsmodelle besitzen. Dies ist Motivation f\"ur die Entwicklung eines effizienten und allgemein g\"ultigen Approximationsmodells komplexer, objektraumbasiert verzeichneter Abbildungen durch ein einfaches Modell. Durch bestimmte Anforderungen an den Einsatz des Modells wird ein signifikanter Einfluss auf die Endergebnisse verhindert. Apriori Qualit\"atsuntersuchungen f\"ur eine dreidimensionale Objektrekonstruktion best\"atigen dies. Entwicklung eines geometrisch basierten Zuordnungsalgorithmus im Mehrbildverband. Das Approximationsmodell wird innerhalb eines ebenfalls hier vorgestellten, geometrisch basierten Zuordnungsverfahrens innerhalb einer Objektrekonstruktion angewendet. Verschiedene Tests mit synthetischen und realen Daten analysieren und bewerten die Methodik der Approximation, der Bildzuordnung und Objektrekonstruktion im Mehrbildverband und zeigen einen effektiven Einsatz der beiden Verfahren. Qualit\"atsanalysen mittels photogrammetrischer
    Rekonstruktion von fluvialen Sedimentoberfl\"achen unter optischen Mehrmedienbedingungen. Als Anwendungsbeispiel wird eine fluviale Sedimentoberfl\"ache durch die Medien Luft, Plexiglas und Wasser hindurch beobachtet und rekonstruiert. Mit diesem Beispiel wird gleichzeitig die Einsatzm\"oglichkeiten photogrammetrischer Methoden f\"ur die Erzeugung von Eingangsdaten f\"ur eine Analyse eines dynamischen, fluvialen Sedimentationsprozesses gepr\"uft. Die Ergebnisse zeigen, dass photogrammetrische Methoden f\"ur die L\"osung dieser Aufgabenstellung grunds\"atzlich anwendbar sind.},
    }

2005

  • S. Abraham and W. Förstner, “Fish-eye-stereo calibration and epipolar rectification,” ISPRS Journal of Photogrammetry and Remote Sensing, vol. 59, iss. 5, p. 278–288, 2005.
    [BibTeX] [PDF]

    The paper describes calibration and epipolar rectification for stereo with fish-eye optics. While stereo processing of classical cameras is state of the art for many applications, stereo with fish-eye cameras have been much less discussed in literature. This paper discusses the geometric calibration and the epipolar rectification as pre-requisite for stereo processing with fish-eyes. First, it surveys mathematical models to describe the projection. Then the paper presents a method of generating epipolar images which are suitable for stereo-processing with a field of view larger than 180 degrees in vertical and horizontal viewing directions. One example with 3D-point measuring from real fish-eye images demonstrates the feasibility of the calibration and rectification procedure. *Keywords: *fish-eye camera calibration; fish-eye stereo; epipolar rectification

    @Article{steffen2005fish,
    title = {Fish-eye-stereo calibration and epipolar rectification},
    author = {Steffen Abraham and Wolfgang F\"orstner},
    journal = {ISPRS Journal of Photogrammetry and Remote Sensing},
    year = {2005},
    number = {5},
    pages = {278--288},
    volume = {59},
    abstract = {The paper describes calibration and epipolar rectification for stereo with fish-eye optics. While stereo processing of classical cameras is state of the art for many applications, stereo with fish-eye cameras have been much less discussed in literature. This paper discusses the geometric calibration and the epipolar rectification as pre-requisite for stereo processing with fish-eyes. First, it surveys mathematical models to describe the projection. Then the paper presents a method of generating epipolar images which are suitable for stereo-processing with a field of view larger than 180 degrees in vertical and horizontal viewing directions. One example with 3D-point measuring from real fish-eye images demonstrates the feasibility of the calibration and rectification procedure. *Keywords: *fish-eye camera calibration; fish-eye stereo; epipolar rectification},
    url = {https://www.ipb.uni-bonn.de/pdfs/Steffen2005Fish.pdf},
    }

  • C. Beder, “Agglomerative Grouping of Observations by Bounding Entropy Variation,” in Pattern Recognition, Vienna, Austria, 2005, pp. 101-108. doi:10.1007/11550518_13
    [BibTeX] [PDF]

    An information theoretic framework for grouping observations is proposed. The entropy change incurred by new observations is analyzed using the Kalman filter update equations. It is found, that the entropy variation is caused by a positive similarity term and a negative proximity term. Bounding the similarity term in the spirit of the minimum description length principle and the proximity term in the spirit of maximum entropy inference a robust and efficient grouping procedure is devised. Some of its properties are demonstrated for the exemplary task of edgel grouping.

    @InProceedings{beder2005agglomerative,
    title = {Agglomerative Grouping of Observations by Bounding Entropy Variation},
    author = {Beder, Christian},
    booktitle = {Pattern Recognition},
    year = {2005},
    address = {Vienna, Austria},
    editor = {Kropatsch, Walter and Sablatnig, Robert and Hanbury, Allan},
    number = {3663},
    organization = {DAGM},
    pages = {101-108},
    publisher = {Springer},
    series = {LNCS},
    abstract = {An information theoretic framework for grouping observations is proposed. The entropy change incurred by new observations is analyzed using the Kalman filter update equations. It is found, that the entropy variation is caused by a positive similarity term and a negative proximity term. Bounding the similarity term in the spirit of the minimum description length principle and the proximity term in the spirit of maximum entropy inference a robust and efficient grouping procedure is devised. Some of its properties are demonstrated for the exemplary task of edgel grouping.},
    doi = {10.1007/11550518_13},
    file = {beder05.agglomerative.pdf:http\://www.ipb.uni-bonn.de/papers/2005/beder05.agglomerative.pdf:PDF},
    url = {https://www.ipb.uni-bonn.de/pdfs/Beder2005Agglomerative.pdf},
    }

  • W. Burgard, M. Moors, C. Stachniss, and F. Schneider, “Coordinated Multi-Robot Exploration,” ieeetransrob, vol. 21, iss. 3, p. 376–378, 2005.
    [BibTeX] [PDF]
    [none]
    @Article{burgard2005a,
    title = {Coordinated Multi-Robot Exploration},
    author = {W. Burgard and M. Moors and C. Stachniss and F. Schneider},
    journal = ieeetransrob,
    year = {2005},
    number = {3},
    pages = {376--378},
    volume = {21},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/burgard05tro.pdf},
    }

  • W. Burgard, C. Stachniss, and G. Grisetti, “Information Gain-based Exploration Using Rao-Blackwellized Particle Filters,” in Proc. of the Learning Workshop (Snowbird), Snowbird, UT, USA, 2005.
    [BibTeX] [PDF]
    [none]
    @InProceedings{burgard2005,
    title = {Information Gain-based Exploration Using Rao-Blackwellized Particle Filters},
    author = {Burgard, W. and Stachniss, C. and Grisetti, G.},
    booktitle = {Proc. of the Learning Workshop (Snowbird)},
    year = {2005},
    address = {Snowbird, UT, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/burgard05snowbird.pdf},
    }

  • G. Grisetti, C. Stachniss, and W. Burgard, “Improving Grid-based SLAM with Rao-Blackwellized Particle Filters by Adaptive Proposals and Selective Resampling,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Barcelona, Spain, 2005, p. 2443–2448.
    [BibTeX] [PDF]
    [none]
    @InProceedings{grisetti2005,
    title = {Improving Grid-based {SLAM} with Rao-Blackwellized Particle Filters by Adaptive Proposals and Selective Resampling},
    author = {Grisetti, G. and Stachniss, C. and Burgard, W.},
    booktitle = icra,
    year = {2005},
    address = {Barcelona, Spain},
    pages = {2443--2448},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/grisetti05icra.pdf},
    }

  • V. Heinzel, B. Waske, M. Braun, and G. Menz, “The potential of multitemporal and multisensoral remote sensing data for the extraction of biophysical parameters of wheat,” in SPIE Remote Sensing Europe, 2005. doi:10.1117/12.627336
    [BibTeX]

    Satellite based monitoring of agricultural activities requires a very high temporal resolution, due to the highly dynamic processes on viewed surfaces. The solitary use of optical data is restricted by its dependency on weather conditions. Hence, the synergetic use of SAR and optical data has a very high potential for agricultural applications such as biomass monitoring or yield estimation. Synthetic Aperture Radar data of the ERS-2 offer the chance of bi-weekly data acquisitions. Additionally, Landsat-5 Thematic Mapper (TM) and high-resolution optical data from the Quickbird satellite shall help to verify the derived information. The Advanced Synthetic Aperture Radar (ASAR) of the European environmental satellite (ENVISAT) enables several acquisitions per week, due to the availability of different incidence angles. Moreover, the ASAR sensor offers the possibility to acquire alternating polarization data, providing HH/HV and VV/VH images. This will help to fill time gaps and bring an additional information gain in further studies. In the present study the temporal development of biomass from two winter wheat fields is modeled based on multitemporal and multisensoral satellite data. For this purpose comprehensive ground truth information (e.g. biomass, LAI, vegetation height) was recorded in weekly intervals for the vegetation period of 2005. A positive relationship between the normalized difference vegetation index (NDVI) of optical data and biomass could be shown. The backscatter of SAR data is negatively related to the biomass. Regression coefficients of models for biomass based on satellite data and the collected biomass vary between r2=0.49 for ERS-2 and r2=0.86 for Quickbird. The study is a first step in the synergetic use of optical and SAR data for biomass modeling and yield estimation over agricultural sites in Central Europe.

    @InProceedings{heinzel2005potential,
    title = {The potential of multitemporal and multisensoral remote sensing data for the extraction of biophysical parameters of wheat},
    author = {Heinzel, Vanessa and Waske, Bj\"orn and Braun, Matthias and Menz, Gunter},
    booktitle = {SPIE Remote Sensing Europe},
    year = {2005},
    abstract = {Satellite based monitoring of agricultural activities requires a very high temporal resolution, due to the highly dynamic processes on viewed surfaces. The solitary use of optical data is restricted by its dependency on weather conditions. Hence, the synergetic use of SAR and optical data has a very high potential for agricultural applications such as biomass monitoring or yield estimation. Synthetic Aperture Radar data of the ERS-2 offer the chance of bi-weekly data acquisitions. Additionally, Landsat-5 Thematic Mapper (TM) and high-resolution optical data from the Quickbird satellite shall help to verify the derived information. The Advanced Synthetic Aperture Radar (ASAR) of the European environmental satellite (ENVISAT) enables several acquisitions per week, due to the availability of different incidence angles. Moreover, the ASAR sensor offers the possibility to acquire alternating polarization data, providing HH/HV and VV/VH images. This will help to fill time gaps and bring an additional information gain in further studies. In the present study the temporal development of biomass from two winter wheat fields is modeled based on multitemporal and multisensoral satellite data. For this purpose comprehensive ground truth information (e.g. biomass, LAI, vegetation height) was recorded in weekly intervals for the vegetation period of 2005. A positive relationship between the normalized difference vegetation index (NDVI) of optical data and biomass could be shown. The backscatter of SAR data is negatively related to the biomass. Regression coefficients of models for biomass based on satellite data and the collected biomass vary between r2=0.49 for ERS-2 and r2=0.86 for Quickbird. The study is a first step in the synergetic use of optical and SAR data for biomass modeling and yield estimation over agricultural sites in Central Europe.},
    doi = {10.1117/12.627336},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • S. Krömeke, “Extraktion affininvarianter Bildmerkmale,” Diplomarbeit Master Thesis, 2005.
    [BibTeX]
    [none]
    @MastersThesis{kromeke2005extraktion,
    title = {Extraktion affininvarianter Bildmerkmale},
    author = {Kr\"omeke, Sven},
    school = {Institute of Photogrammetry, University of Bonn In Zusammenarbeit mit dem Institut f\"ur Informatik der Universit\"at Bonn},
    year = {2005},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, PD Dr. Volker Steinhage},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • T. Läbe and W. Förstner, “Erfahrungen mit einem neuen vollautomatischen Verfahren zur Orientierung digitaler Bilder,” in Proc. of DGPF Conf., Rostock, Germany, 2005.
    [BibTeX] [PDF]

    Der Aufsatz präsentiert ein neues vollautomatisches Verfahren zur relativen Orientierung mehrerer digitaler Bilder kalibrierter Kameras. Es nutzt die in den letzten Jahren neu entwickelten Algorithmen im Bereich der Merkmalsextraktion und der Bildgeometrie und erfordert weder das Anbringen von künstlichen Zielmarken noch die Angabe von Näherungswerten. Es basiert auf automatisch extrahierten Punkten, die mit dem von D. Lowe (2004) vorgeschlagenen Verfahren zur Extraktion skaleninvarianter Bildmerkmale berechnet werden. Diese ermöglichen eine Punktzuordnung auch bei stark konvergenten Aufnahmen. Für die Bestimmung von Näherungswerten der abschließenden Bündelausgleichung wird bei der relativen Orientierung der Bildpaare das direkte Lösungsverfahren von D. Nister (2004) verwendet. Der Aufsatz diskutiert die praktischen Erfahrungen mit den verwendeten Algorithmen anhand von Beispieldatensätzen sowohl von Innenraum- als auch von Aussnaufnahmen.

    @InProceedings{labe2005erfahrungen,
    title = {Erfahrungen mit einem neuen vollautomatischen Verfahren zur Orientierung digitaler Bilder},
    author = {L\"abe, Thomas and F\"orstner, Wolfgang},
    booktitle = {Proc. of DGPF Conf.},
    year = {2005},
    address = {Rostock, Germany},
    abstract = {Der Aufsatz pr\"asentiert ein neues vollautomatisches Verfahren zur relativen Orientierung mehrerer digitaler Bilder kalibrierter Kameras. Es nutzt die in den letzten Jahren neu entwickelten Algorithmen im Bereich der Merkmalsextraktion und der Bildgeometrie und erfordert weder das Anbringen von k\"unstlichen Zielmarken noch die Angabe von N\"aherungswerten. Es basiert auf automatisch extrahierten Punkten, die mit dem von D. Lowe (2004) vorgeschlagenen Verfahren zur Extraktion skaleninvarianter Bildmerkmale berechnet werden. Diese erm\"oglichen eine Punktzuordnung auch bei stark konvergenten Aufnahmen. F\"ur die Bestimmung von N\"aherungswerten der abschlie{\ss}enden B\"undelausgleichung wird bei der relativen Orientierung der Bildpaare das direkte L\"osungsverfahren von D. Nister (2004) verwendet. Der Aufsatz diskutiert die praktischen Erfahrungen mit den verwendeten Algorithmen anhand von Beispieldatens\"atzen sowohl von Innenraum- als auch von Aussnaufnahmen.},
    city = {Bonn},
    proceeding = {Proc. of DGPF Conf.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Labe2005Erfahrungen.pdf},
    }

  • O. Martínez-Mozos, C. Stachniss, and W. Burgard, “Supervised Learning of Places from Range Data using Adaboost,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Barcelona, Spain, 2005, p. 1742–1747.
    [BibTeX] [PDF]
    [none]
    @InProceedings{martinez-mozos2005,
    title = {Supervised Learning of Places from Range Data using Adaboost},
    author = {Mart\'{i}nez-Mozos, O. and Stachniss, C. and W. Burgard},
    booktitle = icra,
    year = {2005},
    address = {Barcelona, Spain},
    pages = {1742--1747},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/martinez05icra.pdf},
    }

  • J. Meidow and H. Schuster, “Voxel-based Quality Evaluation of Photogrammetic Buildingacquisitions.” 2005.
    [BibTeX] [PDF]

    Automatic quality evaluation of photogrammetric building acquisitions is important to realize deficiencies of acquisition approaches, tocompare different acquisitions approaches and to check the keeping of contractual specifications. For the decision-makers a procedure will be suggested taking a few, good interpretable quality measures into account. Therefore, useful quality measures have to be identifiedby the formulation of criteria. These quantities can be derived from the comparison of a test data set and a reference data set capturing the same scene. The acquired topology is usually uncertain as for instance two adjacent buildings may be acquired as one building ortwo buildings. Thus a screening of the registered area is suggested to compute the quantities. The approach is independent of the used acquisition method. For the application of large data sets the corresponding data structures will be explained. In experimental tests thebuildings registered by two commercial acquisition systems will be compared by the quality measures determined in 2D and 3D.

    @InProceedings{meidow2005voxel,
    title = {Voxel-based Quality Evaluation of Photogrammetic Buildingacquisitions},
    author = {Meidow, Jochen and Schuster, Hanns-Florian},
    year = {2005},
    abstract = {Automatic quality evaluation of photogrammetric building acquisitions is important to realize deficiencies of acquisition approaches, tocompare different acquisitions approaches and to check the keeping of contractual specifications. For the decision-makers a procedure will be suggested taking a few, good interpretable quality measures into account. Therefore, useful quality measures have to be identifiedby the formulation of criteria. These quantities can be derived from the comparison of a test data set and a reference data set capturing the same scene. The acquired topology is usually uncertain as for instance two adjacent buildings may be acquired as one building ortwo buildings. Thus a screening of the registered area is suggested to compute the quantities. The approach is independent of the used acquisition method. For the application of large data sets the corresponding data structures will be explained. In experimental tests thebuildings registered by two commercial acquisition systems will be compared by the quality measures determined in 2D and 3D.},
    city = {Bonn},
    proceeding = {Proc. of the CMRT05-Workshop},
    url = {https://www.ipb.uni-bonn.de/pdfs/Meidow2005Voxel.pdf},
    }

  • D. Meier, C. Stachniss, and W. Burgard, “Coordinating Multiple Robots During Exploration Under Communication With Limited Bandwidth,” in Proc. of the European Conf. on Mobile Robots (ECMR), Ancona, Italy, 2005, p. 26–31.
    [BibTeX] [PDF]
    [none]
    @InProceedings{meier2005,
    title = {Coordinating Multiple Robots During Exploration Under Communication With Limited Bandwidth},
    author = {Meier, D. and Stachniss, C. and Burgard, W.},
    booktitle = ecmr,
    year = {2005},
    address = {Ancona, Italy},
    pages = {26--31},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/meier05ecmr.pdf},
    }

  • A. Rottmann, O. Martínez-Mozos, C. Stachniss, and W. Burgard, “Place Classification of Indoor Environments with Mobile Robots using Boosting,” in Proc. of the National Conf. on Artificial Intellience (AAAI), Pittsburgh, PA, USA, 2005, p. 1306–1311.
    [BibTeX] [PDF]
    [none]
    @InProceedings{rottmann2005,
    title = {Place Classification of Indoor Environments with Mobile Robots using Boosting},
    author = {Rottmann, A. and Mart\'{i}nez-Mozos, O. and Stachniss, C. and Burgard, W.},
    booktitle = aaai,
    year = {2005},
    address = {Pittsburgh, PA, USA},
    pages = {1306--1311},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/rottmann05aaai.pdf},
    }

  • H. Schuster, “Detection of man-made-objects based on spatial aggregations,” Department of Photogrammetry, University of Bonn, TR-IGG-P-2006-01, 2005.
    [BibTeX] [PDF]

    This paper presents a method for detecting complex man-made-objects in images. The detection model is a bayesian net that aggregates cliques of image regions which may cover a complex object. Observable attributes of the regions are derived from a rich symbolic image description containing points, lines and regions as basic features including their relations. The model captures the dependency of the region aggregates on the features and their relations with respect to observability due to occlusions and to perspective deformations. Cliques are classified using MAP estimation. Up to now, the model captures cliques with one, two and three regions which is sufficient for detecting polyhedral objects. The model allows to detect and locate multiple appearances of object classes. The joint distribution of the Bayesian net is determined in a supervised learning step based on images with annotated regions. The method is realized and demonstrated for the detection of building roofs in aerial images.

    @TechReport{schuster2005detection,
    title = {Detection of man-made-objects based on spatial aggregations},
    author = {Schuster,Hanns-Florian},
    institution = {Department of Photogrammetry, University of Bonn},
    year = {2005},
    number = {TR-IGG-P-2006-01},
    abstract = {This paper presents a method for detecting complex man-made-objects in images. The detection model is a bayesian net that aggregates cliques of image regions which may cover a complex object. Observable attributes of the regions are derived from a rich symbolic image description containing points, lines and regions as basic features including their relations. The model captures the dependency of the region aggregates on the features and their relations with respect to observability due to occlusions and to perspective deformations. Cliques are classified using MAP estimation. Up to now, the model captures cliques with one, two and three regions which is sufficient for detecting polyhedral objects. The model allows to detect and locate multiple appearances of object classes. The joint distribution of the Bayesian net is determined in a supervised learning step based on images with annotated regions. The method is realized and demonstrated for the detection of building roofs in aerial images.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Schuster2005Detection.pdf},
    }

  • C. Stachniss and W. Burgard, “Mobile Robot Mapping and Localization in Non-Static Environments,” in Proc. of the National Conf. on Artificial Intellience (AAAI), Pittsburgh, PA, USA, 2005, p. 1324–1329.
    [BibTeX] [PDF]
    [none]
    @InProceedings{stachniss2005,
    title = {Mobile Robot Mapping and Localization in Non-Static Environments},
    author = {Stachniss, C. and Burgard, W.},
    booktitle = aaai,
    year = {2005},
    address = {Pittsburgh, PA, USA},
    pages = {1324--1329},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/stachniss05aaai.pdf},
    }

  • C. Stachniss, G. Grisetti, and W. Burgard, “Information Gain-based Exploration Using Rao-Blackwellized Particle Filters,” in Proc. of Robotics: Science and Systems (RSS), Cambridge, MA, USA, 2005, p. 65–72.
    [BibTeX] [PDF]
    [none]
    @InProceedings{stachniss2005a,
    title = {Information Gain-based Exploration Using Rao-Blackwellized Particle Filters},
    author = {Stachniss, C. and Grisetti, G. and Burgard, W.},
    booktitle = rss,
    year = {2005},
    address = {Cambridge, MA, USA},
    pages = {65--72},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/stachniss05rss.pdf},
    }

  • C. Stachniss, G. Grisetti, and W. Burgard, “Recovering Particle Diversity in a Rao-Blackwellized Particle Filter for SLAM after Actively Closing Loops,” in Proc. of the IEEE Intl. Conf. on Robotics & Automation (ICRA), Barcelona, Spain, 2005, p. 667–672.
    [BibTeX] [PDF]
    [none]
    @InProceedings{stachniss2005d,
    title = {Recovering Particle Diversity in a Rao-Blackwellized Particle Filter for {SLAM} after Actively Closing Loops},
    author = {Stachniss, C. and Grisetti, G. and Burgard, W.},
    booktitle = icra,
    year = {2005},
    address = {Barcelona, Spain},
    pages = {667--672},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/stachniss05icra.pdf},
    }

  • C. Stachniss, D. Hähnel, W. Burgard, and G. Grisetti, “On Actively Closing Loops in Grid-based FastSLAM,” Advanced Robotics, vol. 19, iss. 10, p. 1059–1080, 2005.
    [BibTeX] [PDF]
    [none]
    @Article{stachniss2005c,
    title = {On Actively Closing Loops in Grid-based {FastSLAM}},
    author = {Stachniss, C. and H\"{a}hnel, D. and Burgard, W. and Grisetti, G.},
    journal = advancedrobotics,
    year = {2005},
    number = {10},
    pages = {1059--1080},
    volume = {19},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/stachniss05ar.pdf},
    }

  • C. Stachniss, O. Martínez-Mozos, A. Rottmann, and W. Burgard, “Semantic Labeling of Places,” in isrr, San Francisco, CA, USA, 2005.
    [BibTeX] [PDF]
    [none]
    @InProceedings{stachniss2005b,
    title = {Semantic Labeling of Places},
    author = {Stachniss, C. and Mart\'{i}nez-Mozos, O. and Rottmann, A. and Burgard, W.},
    booktitle = isrr,
    year = {2005},
    address = {San Francisco, CA, USA},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/stachniss05isrr.pdf},
    }

  • P. Trahanias, W. Burgard, A. Argyros, D. Hähnel, H. Baltzakis, P. Pfaff, and C. Stachniss, “TOURBOT and WebFAIR: Web-Operated Mobile Robots for Tele-Presence in Populated Exhibitions,” IEEE Robotics & Automation Magazine, vol. 12, iss. 2, p. 77–89, 2005.
    [BibTeX] [PDF]
    [none]
    @Article{trahanias2005,
    title = {{TOURBOT} and {WebFAIR}: Web-Operated Mobile Robots for Tele-Presence in Populated Exhibitions},
    author = {Trahanias, P. and Burgard, W. and Argyros, A. and H\"{a}hnel, D. and Baltzakis, H. and Pfaff, P. and Stachniss, C.},
    journal = {IEEE Robotics \& Automation Magazine},
    year = {2005},
    number = {2},
    pages = {77--89},
    volume = {12},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://ieeexplore.ieee.org/iel5/100/31383/01458329.pdf?arnumber=1458329},
    }

  • B. Waske, V. Heinzel, M. Braun, and G. Menz, “Object-based speckle filtering using multisensoral remote sensing data,” in SPIE Remote Sensing Europe, 2005. doi:10.1117/12.626513
    [BibTeX]

    Speckle – appearing in SAR Images as random noise – hampers image processing techniques like segmentation and classification. Several algorithms have been developed to suppress the speckle effect. One disadvantage, even with optimized speckle reduction algorithms, is a blurring of the image. This effect, which appears especially along the edges of structures, is leading to further problems in subsequent image interpretation. To prevent a loss of information, the knowledge of structures in the image could be an advantage. Therefore the proposed methodology combines common filtering techniques with results from a segmentation of optical images for an object-based speckle filtering. The performance of the adapted algorithm is compared to those of common speckle filters. The accuracy assessment is based on statistical criteria and visual interpretation of the images. The results show that the efficiency of the speckle filter algorithm can be increased while a loss of information can be reduced using the boundary during the filtering process.

    @InProceedings{waske2005object,
    title = {Object-based speckle filtering using multisensoral remote sensing data},
    author = {Waske, Bj\"orn and Heinzel, Vanessa and Braun, Matthias and Menz, Gunter},
    booktitle = {SPIE Remote Sensing Europe},
    year = {2005},
    abstract = {Speckle - appearing in SAR Images as random noise - hampers image processing techniques like segmentation and classification. Several algorithms have been developed to suppress the speckle effect. One disadvantage, even with optimized speckle reduction algorithms, is a blurring of the image. This effect, which appears especially along the edges of structures, is leading to further problems in subsequent image interpretation. To prevent a loss of information, the knowledge of structures in the image could be an advantage. Therefore the proposed methodology combines common filtering techniques with results from a segmentation of optical images for an object-based speckle filtering. The performance of the adapted algorithm is compared to those of common speckle filters. The accuracy assessment is based on statistical criteria and visual interpretation of the images. The results show that the efficiency of the speckle filter algorithm can be increased while a loss of information can be reduced using the boundary during the filtering process.},
    doi = {10.1117/12.626513},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • T. Wellen, “Shadow Removal from Aerial Views for Realistic Terrain Rendering,” Diplomarbeit Master Thesis, 2005.
    [BibTeX]
    [none]
    @MastersThesis{wellen2005shadow,
    title = {Shadow Removal from Aerial Views for Realistic Terrain Rendering},
    author = {Wellen, Thomas},
    school = {Institute of Photogrammetry, University of Bonn In Zusammenarbeit mit dem Institut f\"ur Informatik der Universit\"at Bonn},
    year = {2005},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Prof. Dr. Reinhard Klein},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

2004

  • C. Beder, “Fast Statistically Geometric Reasoning About Uncertain Line Segments in 2D- and 3D-Space,” in Proc. of the DAGM Symposium, Tübingen, 2004, p. 375–382.
    [BibTeX] [PDF]

    This work addresses the two major drawbacks of current statistical uncertain geometric reasoning approaches. In the first part a framework is presented, that allows to represent uncertain line segments in 2D- and 3D-space and perform statistical test with these practically very important types of entities. The second part addresses the issue of performance of geometric reasoning. A data structure is introduced, that allows the efficient processing of large amounts of statistical tests involving geometric entities. The running times of this approach are finally evaluated experimentally.

    @InProceedings{beder2004fast,
    title = {Fast Statistically Geometric Reasoning About Uncertain Line Segments in 2D- and 3D-Space},
    author = {Beder, Christian},
    booktitle = {Proc. of the DAGM Symposium},
    year = {2004},
    address = {T\"ubingen},
    editor = {C.E.Rasmussen and H.H.B\"ulthoff and B.Sch\"olkopf and M.A.Giese},
    number = {3175},
    organization = {DAGM},
    pages = {375--382},
    publisher = {Springer},
    series = {LNCS},
    abstract = {This work addresses the two major drawbacks of current statistical uncertain geometric reasoning approaches. In the first part a framework is presented, that allows to represent uncertain line segments in 2D- and 3D-space and perform statistical test with these practically very important types of entities. The second part addresses the issue of performance of geometric reasoning. A data structure is introduced, that allows the efficient processing of large amounts of statistical tests involving geometric entities. The running times of this approach are finally evaluated experimentally.},
    file = {beder04.fast.pdf:http\://www.ipb.uni-bonn.de/papers/2004/beder04.fast.pdf:PDF},
    url = {https://www.ipb.uni-bonn.de/pdfs/Beder2004Fast.pdf},
    }

  • C. Beder, “A Unified Framework for the Automatic Matching of Points and Lines in Multiple Oriented Images,” in Proc. 20th ISPRS Congress, Istanbul, Turkey, 2004, p. 1109–1113.
    [BibTeX] [PDF]

    The accurate reconstruction of the three-dimensional structure from multiple images is still a challenging problem, so that most current approaches are based on semi-automatic procedures. Therefore the introduction of accurate and reliable automation for this classical problem is one of the key goals of photogrammetric research. This work deals with the problem of matching points and lines across multiple views, in order to gain a highly accurate reconstruction of the depicted object in three-dimensional space. In order to achieve this goal, a novel framework is introduced, that draws a sharp boundary between feature extraction, feature matching based on geometric constraints and feature matching based on radiometric constraints. The isolation of this three parts allows direct control and therefore better understanding of the different kinds of influences on the results. Most image feature matching approaches heavily depend on the radiometric properties of the features and only incorporate geometry information to improve performance and stability. The extracted radiometric descriptors of the features often assume a local planar or smooth object, which is by definition neither present at object corners nor edges. Therefore it would be desirable to use only descriptors that are rigorously founded for the given object model. Unfortunately the task of feature matching based on radiometric properties becomes extremely difficult for this much weaker descriptors. Hence a key feature of the presented framework is the consistent and rigorous use of statistical properties of the extracted geometric entities in the matching process, allowing a unified algorithm for matching points and lines in multiple views using solely the geometric properties of the extracted features. The results are stabilized by the use of many images to compensate for the lack of radiometric information. Radiometric descriptors may be consistently included into the framework for stabilization as well. Results from the application of the presented framework to the task of fully automatic reconstruction of points and lines from multiple images are shown.

    @InProceedings{beder2004unified,
    title = {A Unified Framework for the Automatic Matching of Points and Lines in Multiple Oriented Images},
    author = {Beder, Christian},
    booktitle = {Proc. 20th ISPRS Congress},
    year = {2004},
    address = {Istanbul, Turkey},
    organization = {ISPRS},
    pages = {1109--1113},
    abstract = {The accurate reconstruction of the three-dimensional structure from multiple images is still a challenging problem, so that most current approaches are based on semi-automatic procedures. Therefore the introduction of accurate and reliable automation for this classical problem is one of the key goals of photogrammetric research. This work deals with the problem of matching points and lines across multiple views, in order to gain a highly accurate reconstruction of the depicted object in three-dimensional space. In order to achieve this goal, a novel framework is introduced, that draws a sharp boundary between feature extraction, feature matching based on geometric constraints and feature matching based on radiometric constraints. The isolation of this three parts allows direct control and therefore better understanding of the different kinds of influences on the results. Most image feature matching approaches heavily depend on the radiometric properties of the features and only incorporate geometry information to improve performance and stability. The extracted radiometric descriptors of the features often assume a local planar or smooth object, which is by definition neither present at object corners nor edges. Therefore it would be desirable to use only descriptors that are rigorously founded for the given object model. Unfortunately the task of feature matching based on radiometric properties becomes extremely difficult for this much weaker descriptors. Hence a key feature of the presented framework is the consistent and rigorous use of statistical properties of the extracted geometric entities in the matching process, allowing a unified algorithm for matching points and lines in multiple views using solely the geometric properties of the extracted features. The results are stabilized by the use of many images to compensate for the lack of radiometric information. Radiometric descriptors may be consistently included into the framework for stabilization as well. Results from the application of the presented framework to the task of fully automatic reconstruction of points and lines from multiple images are shown.},
    file = {beder04.unified.pdf:http\://www.ipb.uni-bonn.de/papers/2004/beder04.unified.pdf:PDF},
    url = {https://www.ipb.uni-bonn.de/pdfs/Beder2004Unified.pdf},
    }

  • T. Dickscheid, “Automatische Referenzpunktverfeinerung in Panoramabildern mittels SIFT-Operator,” Bachelor Thesis Master Thesis, 2004.
    [BibTeX]
    [none]
    @MastersThesis{dickscheid2004automatische,
    title = {Automatische Referenzpunktverfeinerung in Panoramabildern mittels SIFT-Operator},
    author = {Dickscheid, Timo},
    year = {2004},
    note = {Betreuung: Dipl.-Inf. Detlev Droege},
    type = {Bachelor Thesis},
    abstract = {[none]},
    city = {Bonn},
    }

  • W. Förstner, “Uncertainty and Projective Geometry,” in Handbook of Computational Geometry for Pattern Recognition, Computer Vision, Neurocomputing and Robotics, E. Bayro-Corrochano, Ed., Springer, 2004, p. 493–535. doi:10.1007/3-540-28247-5_15
    [BibTeX] [PDF]

    Geometric reasoning in Computer Vision always is performed under uncertainty. The great potential of both, projective geometry and statistics, can be integrated easily for propagating uncertainty through reasoning chains, for making decisions on uncertain spatial relations and for optimally estimating geometric entities or transformations. This is achieved by (1) exploiting the potential of statistical estimation and testing theory and by (2) choosing a representation of projective entities and relations which supports this integration. The redundancy of the representation of geometric entities with homogeneous vectors and matrices requires a discussion on the equivalence of uncertain projective entities. The multi-linearity of the geometric relations leads to simple expressions also in the presence of uncertainty. The non-linearity of the geometric relations finally requires to analyze the degree of approximation as a function of the noise level and of the embedding of the vectors in projective spaces. The paper discusses a basic link of statistics and projective geometry, based on a carefully chosen representation, and collects the basic relations in 2D and 3D and for single view geometry.

    @InCollection{forstner2004uncertainty,
    title = {Uncertainty and Projective Geometry},
    author = {F\"orstner, Wolfgang},
    booktitle = {Handbook of Computational Geometry for Pattern Recognition, Computer Vision, Neurocomputing and Robotics},
    publisher = {Springer},
    year = {2004},
    editor = {E. Bayro-Corrochano},
    pages = {493--535},
    abstract = {Geometric reasoning in Computer Vision always is performed under uncertainty. The great potential of both, projective geometry and statistics, can be integrated easily for propagating uncertainty through reasoning chains, for making decisions on uncertain spatial relations and for optimally estimating geometric entities or transformations. This is achieved by (1) exploiting the potential of statistical estimation and testing theory and by (2) choosing a representation of projective entities and relations which supports this integration. The redundancy of the representation of geometric entities with homogeneous vectors and matrices requires a discussion on the equivalence of uncertain projective entities. The multi-linearity of the geometric relations leads to simple expressions also in the presence of uncertainty. The non-linearity of the geometric relations finally requires to analyze the degree of approximation as a function of the noise level and of the embedding of the vectors in projective spaces. The paper discusses a basic link of statistics and projective geometry, based on a carefully chosen representation, and collects the basic relations in 2D and 3D and for single view geometry.},
    doi = {10.1007/3-540-28247-5_15},
    optpages = {to appear},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner2004Uncertainty.pdf},
    }

  • W. Förstner, “Projective Geometry for Photogrammetric Orientation Procedures II,” in Proc. 20th ISPRS Congress, Istanbul, Turkey, 2004.
    [BibTeX] [PDF]
    [none]
    @InProceedings{forstner2004projective,
    title = {Projective Geometry for Photogrammetric Orientation Procedures II},
    author = {F\"orstner, Wolfgang},
    booktitle = {Proc. 20th ISPRS Congress},
    year = {2004},
    address = {Istanbul, Turkey},
    abstract = {[none]},
    city = {Bonn},
    proceeding = {Tutorial notes from the tutorial held at the ISPRS Congress Istanbul},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner2004Projectivea.pdf},
    }

  • W. Förstner, “Projective Geometry for Photogrammetric Orientation Procedures I,” in Tutorial notes from the tutorial held at the ISPRS Congress, Istanbul, Turkey, 2004.
    [BibTeX] [PDF]
    [none]
    @InProceedings{forstner2004projectivea,
    title = {Projective Geometry for Photogrammetric Orientation Procedures I},
    author = {F\"orstner, Wolfgang},
    booktitle = {Tutorial notes from the tutorial held at the ISPRS Congress},
    year = {2004},
    address = {Istanbul, Turkey},
    abstract = {[none]},
    city = {Bonn},
    proceeding = {Tutorial notes from the tutorial held at the ISPRS Congress Istanbul},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner2004Projective.pdf},
    }

  • T. Läbe and W. Förstner, “Geometric Stability of Low-Cost Digital Consumer Cameras,” in Proc. 20th ISPRS Congress, Istanbul, Turkey, 2004, p. 528–535.
    [BibTeX] [PDF]

    During the last years the number of available low-cost digital consumer cameras has significantly increased while their prices decrease. Therefore for many applications with no high-end accuracy requirements it is an important consideration whether to use low-cost cameras. This paper investigates in the use of consumer cameras for photogrammetric measurements and vision systems. An important aspect of the suitability of these cameras is their geometric stability. Two aspects should be considered: The change of calibration parameters when using the camera’s features such as zoom or auto focus and the time invariance of the calibration parameters. Therefore laboratory calibrations of different cameras have been carried out at different times. The resulting calibration parameters, especially the principal distance and the principal point, and their accuracies are given. The usefulness of the information given in the image header, especially the focal length, is compared to the results of the calibration.

    @InProceedings{labe2004geometric,
    title = {Geometric Stability of Low-Cost Digital Consumer Cameras},
    author = {L\"abe, Thomas and F\"orstner, Wolfgang},
    booktitle = {Proc. 20th ISPRS Congress},
    year = {2004},
    address = {Istanbul, Turkey},
    pages = {528--535},
    abstract = {During the last years the number of available low-cost digital consumer cameras has significantly increased while their prices decrease. Therefore for many applications with no high-end accuracy requirements it is an important consideration whether to use low-cost cameras. This paper investigates in the use of consumer cameras for photogrammetric measurements and vision systems. An important aspect of the suitability of these cameras is their geometric stability. Two aspects should be considered: The change of calibration parameters when using the camera's features such as zoom or auto focus and the time invariance of the calibration parameters. Therefore laboratory calibrations of different cameras have been carried out at different times. The resulting calibration parameters, especially the principal distance and the principal point, and their accuracies are given. The usefulness of the information given in the image header, especially the focal length, is compared to the results of the calibration.},
    city = {Bonn},
    proceeding = {Proc. of XXth ISPRS Congress 2004},
    url = {https://www.ipb.uni-bonn.de/pdfs/Labe2004Geometric.pdf},
    }

  • A. Löw, B. Waske, R. Ludwig, and W. Mauser, “Derivation of near surface soil moisture patterns from multiscale Envisat ASAR data,” in EGU General Assembly, Geophysical Research Abstracts, 2004. doi:10.1109/IGARSS.2005.1526194
    [BibTeX]

    Water and energy fluxes at the interface between the land surface and atmosphere are strongly depending on the surface soil moisture content which is highly variable in space and time. It has been shown in numerous studies that microwave remote sensing can provide spatially distributed patterns of surface soil moisture. New sensor generations as ENVISAT ASAR or RADARSAT allow for image acquisitions in different imaging modes and geometries. Imaging modes with large area coverage capabilities as the wide swath mode of ENVISAT ASAR are of special interest for practical applications in this context. The paper presents a semiempirical soil moisture inversion scheme for ENVISAT ASAR data. Different land cover types as well as mixed image pixels are taken into account in the soil moisture retrieval process. The inversion results are validated against in situ measurements and a sensitivity analysis of the model is conducted.

    @InProceedings{low2004derivation,
    title = {Derivation of near surface soil moisture patterns from multiscale Envisat ASAR data},
    author = {L\"ow, A. and Waske, Bj\"orn and Ludwig, R. and Mauser, W.},
    booktitle = {EGU General Assembly, Geophysical Research Abstracts},
    year = {2004},
    abstract = {Water and energy fluxes at the interface between the land surface and atmosphere are strongly depending on the surface soil moisture content which is highly variable in space and time. It has been shown in numerous studies that microwave remote sensing can provide spatially distributed patterns of surface soil moisture. New sensor generations as ENVISAT ASAR or RADARSAT allow for image acquisitions in different imaging modes and geometries. Imaging modes with large area coverage capabilities as the wide swath mode of ENVISAT ASAR are of special interest for practical applications in this context. The paper presents a semiempirical soil moisture inversion scheme for ENVISAT ASAR data. Different land cover types as well as mixed image pixels are taken into account in the soil moisture retrieval process. The inversion results are validated against in situ measurements and a sensitivity analysis of the model is conducted.},
    doi = {10.1109/IGARSS.2005.1526194},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • A. Löw, B. Waske, R. Ludwig, and W. Mauser, “Derivation of near surface soil moisture patterns from ENVISAT ASAR Wide Swath data,” in 4th International Symposium on Retrieval of Bio- and Geophysical parameters from SAR data for land Applications, 2004.
    [BibTeX]
    [none]
    @InProceedings{low2004derivationa,
    title = {Derivation of near surface soil moisture patterns from ENVISAT ASAR Wide Swath data},
    author = {L\"ow, A. and Waske, Bj\"orn and Ludwig, R. and Mauser, W.},
    booktitle = {4th International Symposium on Retrieval of Bio- and Geophysical parameters from SAR data for land Applications},
    year = {2004},
    abstract = {[none]},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • A. Löw, B. Waske, R. Ludwig, and W. Mauser, “Derivation of hydrological parameters from ENVISAT ASAR wide swath data,” in IEEE International Geoscience and Remote Sensing Symposium (IGARSS), 2004. doi:10.1109/IGARSS.2004.1370469
    [BibTeX]

    Spatially distributed information about the current state of the land surface can be obtained from remote sensing measurements. These may be used with great benefit for the understanding of hydrological processes on the landscape level, where in situ measurements must fail due to lacking spatial coverage. The potential to quantify soil moisture conditions of the top soil layer, as well as the derivation of snow parameters by means of active microwave imagery has been successfully demonstrated in numerous studies. In contrast to earlier and rather experimental research efforts, data acquired from the ENVISAT ASAR sensor firstly enables to continuously monitor large areas with high temporal frequency and high spatial resolution. The different operation modes of ASAR allow the derivation of soil moisture maps on both, the field and the regional scale. The paper presents new methods to derive soil moisture and snow covered area information from ASAR wide swath (WSM) datasets. The presented approaches allocate a robust, yet practicable and reliable technique to derive near-surface soil moisture and snow patterns, being the key prerequisite for an operational application in hydrologic modelling.

    @InProceedings{low2004derivationb,
    title = {Derivation of hydrological parameters from ENVISAT ASAR wide swath data},
    author = {L\"ow, A. and Waske, Bj\"orn and Ludwig, R. and Mauser, W.},
    booktitle = {IEEE International Geoscience and Remote Sensing Symposium (IGARSS)},
    year = {2004},
    abstract = {Spatially distributed information about the current state of the land surface can be obtained from remote sensing measurements. These may be used with great benefit for the understanding of hydrological processes on the landscape level, where in situ measurements must fail due to lacking spatial coverage. The potential to quantify soil moisture conditions of the top soil layer, as well as the derivation of snow parameters by means of active microwave imagery has been successfully demonstrated in numerous studies. In contrast to earlier and rather experimental research efforts, data acquired from the ENVISAT ASAR sensor firstly enables to continuously monitor large areas with high temporal frequency and high spatial resolution. The different operation modes of ASAR allow the derivation of soil moisture maps on both, the field and the regional scale. The paper presents new methods to derive soil moisture and snow covered area information from ASAR wide swath (WSM) datasets. The presented approaches allocate a robust, yet practicable and reliable technique to derive near-surface soil moisture and snow patterns, being the key prerequisite for an operational application in hydrologic modelling.},
    doi = {10.1109/IGARSS.2004.1370469},
    keywords = {ENVISAT ASAR; WSM; active microwave imagery; hydrological process; parameter inversion; remote sensing; snow covered area; soil moisture; synthetic aperture radar; wide swath data; data acquisition; hydrological techniques; microwave imaging; microwave measurement; remote sensing by radar; snow; soil; synthetic aperture radar;},
    owner = {waske},
    timestamp = {2012.09.05},
    }

  • M. Luxen, “Performance Evaluation in Natural and Controlled Environments applied to Feature Extraction Procedures,” in Proc. 20th ISPRS Congress, Istanbul, Turkey, Istanbul, Turkey, 2004, p. 1061–1067.
    [BibTeX] [PDF]

    The paper highlights approaches to reference data acquisition in real environments for the purpose of performance evaluation of image analysis procedures. Reference data for the input and for the output of an algorithm is obtained by a) exploiting the noise characteristics of Gaussian image pyramids and b) exploiting multiple views. The approaches are employed exemplarily in the context of evaluating low level feature extraction algorithms.

    @InProceedings{luxen2004performance,
    title = {Performance Evaluation in Natural and Controlled Environments applied to Feature Extraction Procedures},
    author = {Luxen, Marc},
    booktitle = {Proc. 20th ISPRS Congress, Istanbul, Turkey},
    year = {2004},
    address = {Istanbul, Turkey},
    editor = {M. Orhan ALTAN},
    number = {B3},
    organization = {ISPRS},
    pages = {1061--1067},
    series = {{The International Archives of The Photogrammetry, Remote Sensing and Spatial Information Sciences}},
    volume = {XXXV, Part B3},
    abstract = {The paper highlights approaches to reference data acquisition in real environments for the purpose of performance evaluation of image analysis procedures. Reference data for the input and for the output of an algorithm is obtained by a) exploiting the noise characteristics of Gaussian image pyramids and b) exploiting multiple views. The approaches are employed exemplarily in the context of evaluating low level feature extraction algorithms.},
    file = {luxen04.performance.pdf:http\://www.ipb.uni-bonn.de/papers/2004/luxen04.performance.pdf:PDF},
    postscript = {https://www.ipb.uni-bonn.de/papers/2004/luxen04.performance.ps.gz},
    url = {https://www.ipb.uni-bonn.de/pdfs/Luxen2004Performance.pdf},
    }

  • J. Meidow, “Calibration of Stationary Cameras by Observing Objects of Equal Heights on a Ground Plane,” in Proc. 20th ISPRS Congress, Istanbul, Turkey, Istanbul, Turkey, 2004, p. 1067–1072.
    [BibTeX] [PDF]

    With the increasing number of cameras the need for plug-and-play calibration procedures arises to realize a subsequent automatic geometric evaluation of observed scenes. An easy calibration procedure is proposed for a non-zooming stationary camera observing objects of initially equal and known heights above a ground plane. The image coordinates of the corresponding foot and head points of these objects serve as observations. For the interior and exterior orientation of the camera a minimal parametrization is introduced with the height of the camera above the ground plane, its pitch and roll angle and the principal distance. With the idea of corresponding foot and head trajectories being homologue, the situation can be reformulated with a virtual second camera observing the scene. Therefore a plane induced homography can be established for the observation model. This special planar homology can be parametrisied with the unknown calibration quantities. Initially the calibration is estimated by observing foot and head points of objects with known heights. In the subsequent evaluation phase the height and positions of unknown objects can be determined. With the same procedure the calibration can be checked and updated if needed. The approach is evaluated with a real scene.

    @InProceedings{meidow2004calibration,
    title = {Calibration of Stationary Cameras by Observing Objects of Equal Heights on a Ground Plane},
    author = {Meidow, Jochen},
    booktitle = {Proc. 20th ISPRS Congress, Istanbul, Turkey},
    year = {2004},
    address = {Istanbul, Turkey},
    organization = {ISPRS},
    pages = {1067--1072},
    abstract = {With the increasing number of cameras the need for plug-and-play calibration procedures arises to realize a subsequent automatic geometric evaluation of observed scenes. An easy calibration procedure is proposed for a non-zooming stationary camera observing objects of initially equal and known heights above a ground plane. The image coordinates of the corresponding foot and head points of these objects serve as observations. For the interior and exterior orientation of the camera a minimal parametrization is introduced with the height of the camera above the ground plane, its pitch and roll angle and the principal distance. With the idea of corresponding foot and head trajectories being homologue, the situation can be reformulated with a virtual second camera observing the scene. Therefore a plane induced homography can be established for the observation model. This special planar homology can be parametrisied with the unknown calibration quantities. Initially the calibration is estimated by observing foot and head points of objects with known heights. In the subsequent evaluation phase the height and positions of unknown objects can be determined. With the same procedure the calibration can be checked and updated if needed. The approach is evaluated with a real scene.},
    file = {meidow04.calibration.pdf:http\://www.ipb.uni-bonn.de/papers/2004/meidow04.calibration.pdf:PDF},
    url = {https://www.ipb.uni-bonn.de/pdfs/Meidow2004Calibration.pdf},
    }

  • H. Schuster, “Segmentation Of LIDAR Data Using The Tensor Voting Framework,” in Proc. 20th ISPRS Congress, Istanbul, Turkey, Istanbul, Turkey, 2004, p. 1073–1078.
    [BibTeX] [PDF]

    We present an investigation on the use of Tensor Voting for categorizing LIDAR data into outliers, line elements (e.g. high-voltage power lines), surface patches (e.g. roofs) and volumetric elements (e.g. vegetation). The Reconstruction of man-made objects is a main task of photogrammetry. With the increasing quality and availability of LIDAR sensors, range data is becoming more and more important. With LIDAR sensors it is possible to quickly aquire huge amounts of data. But in contrast to classical systems, where the measurement points are chosen by an operator, the data points do not explicitly correspond to meaningful points of the object, i.e. edges, corners, junctions. To extract these features it is necessary to segment the data into homogeneous regions wich can be processed afterwards. Our approach consists of a two step segmentation. The first one uses the Tensor Voting algorithm. It encodes every data point as a particle which sends out a vector field. This can be used to categorize the pointness, edgeness and surfaceness of the data points. After the categorization of the given LIDAR data points also the regions between the data points are rated. Meaningful regions like edges and junctions, given by the inherent structure of the data, are extracted. In a second step the so labeled points are merged due to a similarity constraint. This similarity constraint is based on a minimum description length principle, encoding and comparing different geometrical models. The output of this segmentation consists of non overlapping geometric objects in three dimensional space. The aproach is evaluated with some examples of Lidar data.

    @InProceedings{schuster2004segmentation,
    title = {Segmentation Of LIDAR Data Using The Tensor Voting Framework},
    author = {Schuster, Hanns-Florian},
    booktitle = {Proc. 20th ISPRS Congress, Istanbul, Turkey},
    year = {2004},
    address = {Istanbul, Turkey},
    organization = {ISPRS},
    pages = {1073--1078},
    abstract = {We present an investigation on the use of Tensor Voting for categorizing LIDAR data into outliers, line elements (e.g. high-voltage power lines), surface patches (e.g. roofs) and volumetric elements (e.g. vegetation). The Reconstruction of man-made objects is a main task of photogrammetry. With the increasing quality and availability of LIDAR sensors, range data is becoming more and more important. With LIDAR sensors it is possible to quickly aquire huge amounts of data. But in contrast to classical systems, where the measurement points are chosen by an operator, the data points do not explicitly correspond to meaningful points of the object, i.e. edges, corners, junctions. To extract these features it is necessary to segment the data into homogeneous regions wich can be processed afterwards. Our approach consists of a two step segmentation. The first one uses the Tensor Voting algorithm. It encodes every data point as a particle which sends out a vector field. This can be used to categorize the pointness, edgeness and surfaceness of the data points. After the categorization of the given LIDAR data points also the regions between the data points are rated. Meaningful regions like edges and junctions, given by the inherent structure of the data, are extracted. In a second step the so labeled points are merged due to a similarity constraint. This similarity constraint is based on a minimum description length principle, encoding and comparing different geometrical models. The output of this segmentation consists of non overlapping geometric objects in three dimensional space. The aproach is evaluated with some examples of Lidar data.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Schuster2004Segmentation.pdf},
    }

  • C. Stachniss, G. Grisetti, D. Hähnel, and W. Burgard, “Improved Rao-Blackwellized Mapping by Adaptive Sampling and Active Loop-Closure,” , Ilmenau, Germany, 2004, p. 1–15.
    [BibTeX] [PDF]
    [none]
    @InProceedings{stachniss2004a,
    title = {Improved Rao-Blackwellized Mapping by Adaptive Sampling and Active Loop-Closure},
    author = {Stachniss, C. and Grisetti, G. and H\"{a}hnel, D. and Burgard, W.},
    booktitle = soave,
    year = {2004},
    address = {Ilmenau, Germany},
    pages = {1--15},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/stachniss04soave.pdf},
    }

  • C. Stachniss, D. Hähnel, and W. Burgard, “Exploration with Active Loop-Closing for FastSLAM,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), Sendai, Japan, 2004, p. 1505–1510.
    [BibTeX] [PDF]
    [none]
    @InProceedings{stachniss2004,
    title = {Exploration with Active Loop-Closing for {FastSLAM}},
    author = {Stachniss, C. and H\"{a}hnel, D. and Burgard, W.},
    booktitle = iros,
    year = {2004},
    address = {Sendai, Japan},
    pages = {1505--1510},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/stachniss04iros.pdf},
    }

  • M. Thöle, “Evaluierung verschiedener Ansätze zur Schätzung und Repräsentation unsicherer Geraden im Raum,” Diplomarbeit Master Thesis, 2004.
    [BibTeX]
    [none]
    @MastersThesis{thole2004evaluierung,
    title = {Evaluierung verschiedener Ans\"atze zur Sch\"atzung und Repr\"asentation unsicherer Geraden im Raum},
    author = {Th\"ole, Markus},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2004},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. Marc Luxen},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

2003

  • M. Appel and U. Weidner, “A New Approach Towards Quantative Quality Evaluation of 3D Building Models,” in ISPRS Commission IV Joint Workshop Challenges in Geospatial Analysis, Integration and Visualization II, Stuttgart, 2003.
    [BibTeX] [PDF]

    The need of describing the quality of data ranges from data acquisition to the use of the data in geoinformation systems. The contractor should verify that the data he captured suffices the specifications and the end user wants to know, if the data is suited for a special task at hand. Both are interested in quantifying the quality, possibly by simple and meaningful measures, which can be easily computed without much further efforts prohibitive with respect to involved labour and related costs. Much work has been already done on the standardization of principles of quality evaluation, reports and metadata (c.f. ISO standards 19113, 19114 and 19115), but only few contributions deal with the question of defining quality measures for a specific application, which possibly may be generalized for others as well. A recent project in cooperation with the Surveying Office of North Rhine-Westphalia investigates the topic of quality evaluation of photogrammetrically captured building models with the aim to identify useful quality measures which can be used for contract specificatios and to implement an approach for automated quality control based on a comparision of measurement and reference data. This paper presents the concept of the approach and first results.

    @InProceedings{appel2003new,
    title = {A New Approach Towards Quantative Quality Evaluation of 3D Building Models},
    author = {Appel, Mirko and Weidner, Uwe},
    booktitle = {ISPRS Commission IV Joint Workshop Challenges in Geospatial Analysis, Integration and Visualization II},
    year = {2003},
    address = {Stuttgart},
    abstract = {The need of describing the quality of data ranges from data acquisition to the use of the data in geoinformation systems. The contractor should verify that the data he captured suffices the specifications and the end user wants to know, if the data is suited for a special task at hand. Both are interested in quantifying the quality, possibly by simple and meaningful measures, which can be easily computed without much further efforts prohibitive with respect to involved labour and related costs. Much work has been already done on the standardization of principles of quality evaluation, reports and metadata (c.f. ISO standards 19113, 19114 and 19115), but only few contributions deal with the question of defining quality measures for a specific application, which possibly may be generalized for others as well. A recent project in cooperation with the Surveying Office of North Rhine-Westphalia investigates the topic of quality evaluation of photogrammetrically captured building models with the aim to identify useful quality measures which can be used for contract specificatios and to implement an approach for automated quality control based on a comparision of measurement and reference data. This paper presents the concept of the approach and first results.},
    city = {Bonn},
    proceeding = {ISPRS},
    url = {https://www.ipb.uni-bonn.de/pdfs/Appel2003New.pdf},
    }

  • W. Förstner, “Notions of Scale in Geosciences,” in Dynamics of Multi-Scale Earth Systems, 2003, p. 17–39. doi:10.1007/3-540-45256-7_2
    [BibTeX] [PDF]

    The paper discusses the notion scale within geosciences. The high complexity of the developed models and the wide range of participating disciplines goes along with different notions of scale used during data acquisition and model building. The paper collects the different notions of scale shows the close relations between the different notions: map scale, resolution, window size, averqage wavelength, level of aggregation, level of abstraction. Finally the problem of identifying scale in models is discussed. A synopsis of the continuous measures for scale links the different notions.

    @InProceedings{forstner2003notions,
    title = {Notions of Scale in Geosciences},
    author = {F\"orstner, Wolfgang},
    booktitle = {Dynamics of Multi-Scale Earth Systems},
    year = {2003},
    editor = {Neugebauer, Horst J. and Simmer, Clemens},
    pages = {17--39},
    abstract = {The paper discusses the notion scale within geosciences. The high complexity of the developed models and the wide range of participating disciplines goes along with different notions of scale used during data acquisition and model building. The paper collects the different notions of scale shows the close relations between the different notions: map scale, resolution, window size, averqage wavelength, level of aggregation, level of abstraction. Finally the problem of identifying scale in models is discussed. A synopsis of the continuous measures for scale links the different notions.},
    city = {Bonn},
    doi = {10.1007/3-540-45256-7_2},
    proceeding = {Dynamics of Multi-Scale Earth Systems},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner2003Notions.pdf},
    }

  • W. Förstner and T. Läbe, “Learning Optimal Parameters for Self-diagnosis in a System for Automatic Exterior Orientation,” in Vision Systems (ICVS) 2003, Graz, 2003, p. 236–246. doi:10.1007/3-540-36592-3_23
    [BibTeX] [PDF]

    The paper describes the automatic learning of parameters for self-diagnosis of a system for automatic orientation of single aerial images used by the State Survey Department of Northrhine–Westfalia. The orientation is based on 3D lines as ground control features, and uses a sequence of probabilistic clustering, search and ML-estimation for robustly estimating the 6 parameters of the exterior orientation of an aerial image. The system is interpreted as a classifier, making an internal evaluation of its success. The classification is based on a number of parameters possibly relevant for self-diagnosis. A hand designed classifier reached 11% false negatives and 2% false positives on appr. 17000 images. A first version of a new classifier using support vector machines is evaluated. Based on appr. 650 images the classifier reaches 2 % false negatives and 4% false positives, indicating an increase in performance.

    @InProceedings{forstner2003learning,
    title = {Learning Optimal Parameters for Self-diagnosis in a System for Automatic Exterior Orientation},
    author = {F\"orstner, Wolfgang and L\"abe, Thomas},
    booktitle = {Vision Systems (ICVS) 2003},
    year = {2003},
    address = {Graz},
    editor = {Crowley, James L. and Piater, Justus H. and Vincze, M. and Paletta, L.},
    pages = {236--246},
    abstract = {The paper describes the automatic learning of parameters for self-diagnosis of a system for automatic orientation of single aerial images used by the State Survey Department of Northrhine--Westfalia. The orientation is based on 3D lines as ground control features, and uses a sequence of probabilistic clustering, search and ML-estimation for robustly estimating the 6 parameters of the exterior orientation of an aerial image. The system is interpreted as a classifier, making an internal evaluation of its success. The classification is based on a number of parameters possibly relevant for self-diagnosis. A hand designed classifier reached 11% false negatives and 2% false positives on appr. 17000 images. A first version of a new classifier using support vector machines is evaluated. Based on appr. 650 images the classifier reaches 2 % false negatives and 4% false positives, indicating an increase in performance.},
    city = {Bonn},
    doi = {10.1007/3-540-36592-3_23},
    proceeding = {Computer Vision Systems (ICVS) 2003},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner2003Learning.pdf},
    }

  • M. Luxen, “Variance Component Estimation in Performance Characteristics Applied to Feature Extraction Procedures,” in Pattern Recognition, 25th DAGM Symposium, Magdeburg, Germany,, 2003, p. 498–506. doi:10.1007/978-3-540-45243-0_64
    [BibTeX] [PDF]

    The paper proposes variance component estimation (VCE) for empirical quality evaluation in computer vision. An outline is given for the scope of variance component estimation in the context of quality evaluation. The principle of variance component estimation is explained and the approach is applied to results of low level feature extraction. Ground truth is only partly needed for estimating the precision, accuracy and bias of extracted points and straight line segments. The results of diverse feature extraction modules are compared.

    @InProceedings{luxen2003variance,
    title = {Variance Component Estimation in Performance Characteristics Applied to Feature Extraction Procedures},
    author = {Luxen, Marc},
    booktitle = {Pattern Recognition, 25th DAGM Symposium},
    year = {2003},
    address = {Magdeburg, Germany,},
    editor = {Bernd Michaelis and Gerald Krell},
    month = sep,
    pages = {498--506},
    publisher = {Springer},
    series = {Lecture Notes in Computer Science},
    volume = {2781},
    abstract = {The paper proposes variance component estimation (VCE) for empirical quality evaluation in computer vision. An outline is given for the scope of variance component estimation in the context of quality evaluation. The principle of variance component estimation is explained and the approach is applied to results of low level feature extraction. Ground truth is only partly needed for estimating the precision, accuracy and bias of extracted points and straight line segments. The results of diverse feature extraction modules are compared.},
    bibsource = {DBLP, https://dblp.uni-trier.de},
    doi = {10.1007/978-3-540-45243-0_64},
    isbn = {3-540-40861-4},
    url = {https://www.ipb.uni-bonn.de/pdfs/Luxen2003Variance.pdf},
    }

  • M. Luxen and A. Brunn, “Parameterschätzung aus unvollständigen Beobachtungsdaten mittels des EM-Algorithmus,” Zeitschrift für Geodäsie, Geoinformation und Landmanagement (ZfV), iss. 02, p. 71–79, 2003.
    [BibTeX] [PDF]

    The paper gives an introduction into the problem of parameter estimation from incomplete data and presents the Expectation Maximization Algorithm as a method for solving such problems. The algorithm is put in relation to geodetic estimation problems. Its practicability is shown by an example of line extraction from digital images.

    @Article{luxen2003parameterschatzung,
    title = {Parametersch\"atzung aus unvollst\"andigen Beobachtungsdaten mittels des EM-Algorithmus},
    author = {Luxen, Marc and Brunn, Ansgar},
    journal = {Zeitschrift f\"ur Geod\"asie, Geoinformation und Landmanagement (ZfV)},
    year = {2003},
    number = {02},
    pages = {71--79},
    abstract = {The paper gives an introduction into the problem of parameter estimation from incomplete data and presents the Expectation Maximization Algorithm as a method for solving such problems. The algorithm is put in relation to geodetic estimation problems. Its practicability is shown by an example of line extraction from digital images.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Luxen2003Parameterschatzung.pdf},
    }

  • H. Schuster and W. Förstner, “Segmentierung, Rekonstruktion und Datenfusion bei der Objekterfassung mit Entfernungsdaten – ein Überblick,” in Proc. 2. Oldenburger 3D-Tage, Oldenburg, 2003.
    [BibTeX] [PDF]

    Mit dem Aufkommen von flächig erfaßten Entfernungsdaten im Vermessungswesen steht ein Paradigmenwechsel in der Auswertung und Verarbeitung dieser Daten an, vergleichbar dem Übergang von der analytischen zur digitalen Photogrammetrie mit der Verfügbarkeit digitaler bzw. digitalisierter Bilder. Der vorliegende Beitrag gibt einen Überblick über Verfahren zur Fusion und Segmentierung von Entfernungsdaten und verdeutlicht Potentiale zur weiteren Automatisierung

    @InProceedings{schuster2003segmentierung,
    title = {Segmentierung, Rekonstruktion und Datenfusion bei der Objekterfassung mit Entfernungsdaten - ein \"Uberblick},
    author = {Schuster, Hanns-Florian and F\"orstner, Wolfgang},
    booktitle = {Proc. 2. Oldenburger 3D-Tage},
    year = {2003},
    address = {Oldenburg},
    abstract = {Mit dem Aufkommen von fl\"achig erfa{\ss}ten Entfernungsdaten im Vermessungswesen steht ein Paradigmenwechsel in der Auswertung und Verarbeitung dieser Daten an, vergleichbar dem \"Ubergang von der analytischen zur digitalen Photogrammetrie mit der Verf\"ugbarkeit digitaler bzw. digitalisierter Bilder. Der vorliegende Beitrag gibt einen \"Uberblick \"uber Verfahren zur Fusion und Segmentierung von Entfernungsdaten und verdeutlicht Potentiale zur weiteren Automatisierung},
    city = {Bonn},
    proceeding = {Proc. 2. Oldenburger 3D-Tage},
    url = {https://www.ipb.uni-bonn.de/pdfs/Schuster2003Segmentierung.pdf},
    }

  • C. Stachniss and W. Burgard, “Exploring Unknown Environments with Mobile Robots using Coverage Maps,” in ijcai, Acapulco, Mexico, 2003, p. 1127–1132.
    [BibTeX] [PDF]
    [none]
    @InProceedings{stachniss2003,
    title = {Exploring Unknown Environments with Mobile Robots using Coverage Maps},
    author = {Stachniss, C. and Burgard, W.},
    booktitle = ijcai,
    year = {2003},
    address = {Acapulco, Mexico},
    pages = {1127--1132},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/stachniss03ijcai.pdf},
    }

  • C. Stachniss and W. Burgard, “Using Coverage Maps to Represent the Environment of Mobile Robots,” in Proc. of the European Conf. on Mobile Robots (ECMR), Radziejowice, Poland, 2003, p. 59–64.
    [BibTeX] [PDF]
    [none]
    @InProceedings{stachniss2003a,
    title = {Using Coverage Maps to Represent the Environment of Mobile Robots},
    author = {Stachniss, C. and Burgard, W.},
    booktitle = ecmr,
    year = {2003},
    address = {Radziejowice, Poland},
    pages = {59--64},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/stachniss03ecmr.pdf},
    }

  • C. Stachniss and W. Burgard, “Mapping and Exploration with Mobile Robots using Coverage Maps,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), Las Vegas, NV, USA, 2003, p. 476–481.
    [BibTeX] [PDF]
    [none]
    @InProceedings{stachniss2003b,
    title = {Mapping and Exploration with Mobile Robots using Coverage Maps},
    author = {Stachniss, C. and Burgard, W.},
    booktitle = iros,
    year = {2003},
    address = {Las Vegas, NV, USA},
    pages = {476--481},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/stachniss03iros.pdf},
    }

  • C. Stachniss, D. Hähnel, and W. Burgard, “Grid-based FastSLAM and Exploration with Active Loop Closing,” in Online Proc. of the Dagstuhl Seminar on Robot Navigation (Dagstuhl Seminar 03501), Dagstuhl, Germany, 2003.
    [BibTeX]
    [none]
    @InProceedings{stachniss2003c,
    title = {Grid-based {FastSLAM} and Exploration with Active Loop Closing},
    author = {Stachniss, C. and H\"{a}hnel, D. and Burgard, W.},
    booktitle = {Online Proc. of the Dagstuhl Seminar on Robot Navigation (Dagstuhl Seminar 03501)},
    year = {2003},
    address = {Dagstuhl, Germany},
    abstract = {[none]},
    timestamp = {2014.04.24},
    }

2002

  • M. Appel and W. Förstner, “Scene Constraints for Direct Single Image Orientation with Selfdiagnosis,” in Photogrammetric Computer Vision, Graz, 2002, p. 42–49.
    [BibTeX] [PDF]

    In this paper we present a new method for single image orientation using an orthographic drawing or map of the scene. Environments which are dominated by man made objects, such as industrial facilities or urban scenes, are very rich of vertical and horizontal structures. These scene constraints reflect in symbols in an associated drawing. For example, vertical lines in the scene are usually marked as points in a drawing. The resulting orientation may be used in augmented reality systems or for initiating a subsequent bundle adjustment of all available images. In this paper we propose to use such scene constraints taken from a drawing to estimate the camera orientation. We use observed vertical lines, horizontal lines, and points to estimate the projection matrix P of the image. We describe the constraints in terms of projective geometry which makes them straightforward and very transparent. In contrast to the work of Bondyfalatetal 2001, we give a direct solution for P without using the fundamental matrix between image and map as we do not need parallelity constraints between lines in a vertical plane other than for horizontal lines, nor observed perpendicular lines. We present both a direct solution for P and a statistically optimal, iterative solution, which takes the uncertainties of the contraints and the observations in the image and the drawing into account. It is a simplifying modification of the eigenvalue method of Matei/Meer 1997. The method allows to evaluate the results statistically, namely to verify the used projection model and the assumed statistical properties of the measured image and map quantities and to validate the achieved accuracy of the estimated projection matrix P. To demonstrate the feasibility of the approach, we present results of the application of our method to both synthetic data and real scenes in industrial environment. Statistical tests show the performance and prove the rigour of the new method.

    @InProceedings{appel2002scene,
    title = {Scene Constraints for Direct Single Image Orientation with Selfdiagnosis},
    author = {Appel, Mirko and F\"orstner, Wolfgang},
    booktitle = {Photogrammetric Computer Vision, Graz},
    year = {2002},
    editor = {F. Leberl and R. Kalliany},
    pages = {42--49},
    volume = {A},
    abstract = {In this paper we present a new method for single image orientation using an orthographic drawing or map of the scene. Environments which are dominated by man made objects, such as industrial facilities or urban scenes, are very rich of vertical and horizontal structures. These scene constraints reflect in symbols in an associated drawing. For example, vertical lines in the scene are usually marked as points in a drawing. The resulting orientation may be used in augmented reality systems or for initiating a subsequent bundle adjustment of all available images. In this paper we propose to use such scene constraints taken from a drawing to estimate the camera orientation. We use observed vertical lines, horizontal lines, and points to estimate the projection matrix P of the image. We describe the constraints in terms of projective geometry which makes them straightforward and very transparent. In contrast to the work of Bondyfalatetal 2001, we give a direct solution for P without using the fundamental matrix between image and map as we do not need parallelity constraints between lines in a vertical plane other than for horizontal lines, nor observed perpendicular lines. We present both a direct solution for P and a statistically optimal, iterative solution, which takes the uncertainties of the contraints and the observations in the image and the drawing into account. It is a simplifying modification of the eigenvalue method of Matei/Meer 1997. The method allows to evaluate the results statistically, namely to verify the used projection model and the assumed statistical properties of the measured image and map quantities and to validate the achieved accuracy of the estimated projection matrix P. To demonstrate the feasibility of the approach, we present results of the application of our method to both synthetic data and real scenes in industrial environment. Statistical tests show the performance and prove the rigour of the new method.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Appel2002Scene.pdf},
    }

  • C. Beder, “An Optimisation Method for Obtaining the Fundamental Matrix from an Image Pair,” Diplomarbeit Master Thesis, 2002.
    [BibTeX]

    Die Bestimmung der relativen Lage zweier Kameras zum Zeitpunkt der Aufnahmen, die sog. relative Orientierung der Bilder, stellt ein klassisches Problem der Photogrammetrie dar und ist Grundlage jeder Auswertung von Stereobildpaaren. Eine vollautomatische allgemeine Lösung dieses Problems existiert bisher nicht. Grund ist die Schwierigkeit, homologe Bilddetails, die sich auf denselben Objektpunkt beziehen, automatisch und unter beliebigen unbekannten Perspektiven zu finden. Sobald homologe Punkte vorliegen, existieren klassische Verfahren zur Bestimmung der sog. Fundamentalmatrix, die die gesamte Information der relativen Orientierung zweier geradentreu abbildender Kameras enthält. Die Fundamentalmatrix ermöglicht insbesondere die Formulierung eines geometrischen Kriteriums für Punkte, die sog. Koplanaritätsbedingung. Als zweites Kriterium für die Homologie von Punkten verwendet man ein radiometrisches Kriterium, meist die Ähnlichkeit der Intensitäts- oder Farbverteilung in der Umgebung der Punkte. Sie ist jedoch nur unter eingeschränkten Bedingungen leicht zu bestimmen, nicht etwa bei partiellen Verdeckungen. Ziel der Arbeit ist nun die Bestimmung der Fundamentalmatrix als Optimierungsverfahren zu formulieren, das gelichzeitig die geometrischen und die radiometrischen Bedingungen berücksichtigt. Intensitätsunterschiede und Abweichungen von der Koplanaritätsbedingung werden durch die Formulierung von normierten chi-quadrat-verteilten Distanzmaßen integriert. Dabei werden die Umgebungen homologer Punkte maßstabs- und rotationsinvariant verglichen. Die Zuordnung wird mit einem Annealingverfahren bestimmt. Die Leistungsfähigkeit und die Grenzen des Verfahrens wird an hand eines künstlichen Beispiels, für das die wahren Werte der Fundamentalmatrix bekannt sind, demonstriert.

    @MastersThesis{beder2002optimisation,
    title = {An Optimisation Method for Obtaining the Fundamental Matrix from an Image Pair},
    author = {Beder, Christian},
    school = {Institute of Photogrammetry, University of Bonn In Zusammenarbeit mit dem Institut f\"ur Informatik der Universit\"at Bonn},
    year = {2002},
    note = {Betreuung: Prof. Dr. Joachim Buhmann, Prof. Dr.-Ing. Wolfgang F\"orstner},
    type = {Diplomarbeit},
    abstract = {Die Bestimmung der relativen Lage zweier Kameras zum Zeitpunkt der Aufnahmen, die sog. relative Orientierung der Bilder, stellt ein klassisches Problem der Photogrammetrie dar und ist Grundlage jeder Auswertung von Stereobildpaaren. Eine vollautomatische allgemeine L\"osung dieses Problems existiert bisher nicht. Grund ist die Schwierigkeit, homologe Bilddetails, die sich auf denselben Objektpunkt beziehen, automatisch und unter beliebigen unbekannten Perspektiven zu finden. Sobald homologe Punkte vorliegen, existieren klassische Verfahren zur Bestimmung der sog. Fundamentalmatrix, die die gesamte Information der relativen Orientierung zweier geradentreu abbildender Kameras enth\"alt. Die Fundamentalmatrix erm\"oglicht insbesondere die Formulierung eines geometrischen Kriteriums f\"ur Punkte, die sog. Koplanarit\"atsbedingung. Als zweites Kriterium f\"ur die Homologie von Punkten verwendet man ein radiometrisches Kriterium, meist die \"Ahnlichkeit der Intensit\"ats- oder Farbverteilung in der Umgebung der Punkte. Sie ist jedoch nur unter eingeschr\"ankten Bedingungen leicht zu bestimmen, nicht etwa bei partiellen Verdeckungen. Ziel der Arbeit ist nun die Bestimmung der Fundamentalmatrix als Optimierungsverfahren zu formulieren, das gelichzeitig die geometrischen und die radiometrischen Bedingungen ber\"ucksichtigt. Intensit\"atsunterschiede und Abweichungen von der Koplanarit\"atsbedingung werden durch die Formulierung von normierten chi-quadrat-verteilten Distanzma{\ss}en integriert. Dabei werden die Umgebungen homologer Punkte ma{\ss}stabs- und rotationsinvariant verglichen. Die Zuordnung wird mit einem Annealingverfahren bestimmt. Die Leistungsf\"ahigkeit und die Grenzen des Verfahrens wird an hand eines k\"unstlichen Beispiels, f\"ur das die wahren Werte der Fundamentalmatrix bekannt sind, demonstriert.},
    city = {Bonn},
    }

  • K. Borchert, H. Kokossulis, and G. Müller, “3D-Rekonstruktion aus Videosequenzen,” Diplomarbeit Master Thesis, 2002.
    [BibTeX]
    [none]
    @MastersThesis{borchert20023d,
    title = {3D-Rekonstruktion aus Videosequenzen},
    author = {Borchert, Konstantin and Kokossulis, Hubert and M\"uller, Gero},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2002},
    note = {Betreuung: Prof. Dr. Reinhard Klein, Prof. Dr.-Ing. Wolfgang F\"orstner},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • W. Förstner, “Computer Vision and Photogrammetry –- Mutual Questions: Geometry, Statistics and Cognition,” in Bildteknik/Image Science, Swedish Society for Photogrammetry and Remote Sensing, 2002, p. 151–164.
    [BibTeX] [PDF]

    The emerging interaction between Computer Vision and Photogrammetry certainly is well in the flavor of Kennert Torlegard’s professional life: Not only his PhD thesis dealt with un-calibrated cameras, not only was one of his main interests close range photogrammetry with all its various applications, no, he also was active in bringing the researchers of both fields together. This paper on one side collects experiences of the dialog between Computer Vision and Photogrammetry. On the other side it gives an example, closely related to Kennert Torlegards PhD thesis of a type of analysis hopefully useful for researchers from both fields, illuminating the common fields geometry and statistics and the possibilities of mutual exchange, and finally reflects on the recent developments in the area of cognitive vision and their relation to aerial image interpretation.

    @InProceedings{forstner2002computer,
    title = {Computer Vision and Photogrammetry --- Mutual Questions: Geometry, Statistics and Cognition},
    author = {F\"orstner, Wolfgang},
    booktitle = {Bildteknik/Image Science, Swedish Society for Photogrammetry and Remote Sensing},
    year = {2002},
    pages = {151--164},
    abstract = {The emerging interaction between Computer Vision and Photogrammetry certainly is well in the flavor of Kennert Torlegard's professional life: Not only his PhD thesis dealt with un-calibrated cameras, not only was one of his main interests close range photogrammetry with all its various applications, no, he also was active in bringing the researchers of both fields together. This paper on one side collects experiences of the dialog between Computer Vision and Photogrammetry. On the other side it gives an example, closely related to Kennert Torlegards PhD thesis of a type of analysis hopefully useful for researchers from both fields, illuminating the common fields geometry and statistics and the possibilities of mutual exchange, and finally reflects on the recent developments in the area of cognitive vision and their relation to aerial image interpretation.},
    city = {Bonn},
    proceeding = {Bildteknik/Image Science, Swedish Society for Photogrammetry and Remote Sensing},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner2002Computer.pdf},
    }

  • W. Förstner, Mapping on Demand – A Dream, 2002.
    [BibTeX] [PDF]
    [none]
    @Misc{forstner2002mapping,
    title = {Mapping on Demand - A Dream},
    author = {F\"orstner, Wolfgang},
    howpublished = {https://www.vernon.eu/ECVision/research\_planning/Research\_Dreams.htm},
    year = {2002},
    abstract = {[none]},
    timestamp = {2011.05.22},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner2002Mapping.pdf},
    }

  • T. Läbe and M. Henze, “Automatische äussere Orientierung in der Orthophotoproduktion – ein Erfahrungsbericht,” , Neubrandenburg, Germany, 2002, p. 245–252.
    [BibTeX] [PDF]

    Eine der notwendigen Voraussetzungen zur Erstellung eines Orthophotos ist die Kenntnis der äußeren Orientierung des zu bearbeitenden Bildes. Hierfür wurde am Institut für Photogrammetrie der Universität Bonn innerhalb eines Kooperationsprojektes mit dem Landesvermessungsamt Nordrhein-Westfalen ein vollautomatisches Verfahren entwickelt, das auf der Suche von projizierten 3D-Kanten im Bild basiert. Das Programm trägt den Namen “AMOR” (Automatische Modellgestützte ORientierung) und beinhaltet sowohl Bildverarbeitung (Kantenextraktion) als auch robuste Schätzverfahren für die Bestimmung der Orientierungselemente. Als Datenbasis zur Bestimmung der äußeren Orientierung werden anders als beim konventionellen manuellen Vorgehen keine Passpunkte sondern sogenannte “Passpunktmodelle” verwendet. Dies sind Mengen georeferenzierter 3D-Kanten, wofür sich insbesondere Gebäudekanten eines digitalen Gebäudemodells eignen. Zur fächendeckenden Orthophotoproduktion wurden in Nordrhein-Westfalen landesweit Gebäude als Passpunktmodelle erfasst. AMOR ist in den Produktionsablauf der Orthophotoherstellung beim Landesvermessungsamt integriert worden und kann aufgrund der Passpunktmodelldatenbank auf einem Großteil der Landesfläche angewendet werden. Der Aufsatz gibt einen Überblick über das Verfahren zur automatischen Orientierungsbestimmung und dessen Integration mit besonderem Schwerpunkt auf den praktischen Einsatz beim Landesvermessungsamt NRW.

    @InProceedings{labe2002automatische,
    title = {Automatische \"aussere Orientierung in der Orthophotoproduktion - ein Erfahrungsbericht},
    author = {L\"abe, Thomas and Henze, Manfred},
    year = {2002},
    address = {Neubrandenburg, Germany},
    pages = {245--252},
    abstract = {Eine der notwendigen Voraussetzungen zur Erstellung eines Orthophotos ist die Kenntnis der \"au{\ss}eren Orientierung des zu bearbeitenden Bildes. Hierf\"ur wurde am Institut f\"ur Photogrammetrie der Universit\"at Bonn innerhalb eines Kooperationsprojektes mit dem Landesvermessungsamt Nordrhein-Westfalen ein vollautomatisches Verfahren entwickelt, das auf der Suche von projizierten 3D-Kanten im Bild basiert. Das Programm tr\"agt den Namen "AMOR" (Automatische Modellgest\"utzte ORientierung) und beinhaltet sowohl Bildverarbeitung (Kantenextraktion) als auch robuste Sch\"atzverfahren f\"ur die Bestimmung der Orientierungselemente. Als Datenbasis zur Bestimmung der \"au{\ss}eren Orientierung werden anders als beim konventionellen manuellen Vorgehen keine Passpunkte sondern sogenannte "Passpunktmodelle" verwendet. Dies sind Mengen georeferenzierter 3D-Kanten, wof\"ur sich insbesondere Geb\"audekanten eines digitalen Geb\"audemodells eignen. Zur f\"achendeckenden Orthophotoproduktion wurden in Nordrhein-Westfalen landesweit Geb\"aude als Passpunktmodelle erfasst. AMOR ist in den Produktionsablauf der Orthophotoherstellung beim Landesvermessungsamt integriert worden und kann aufgrund der Passpunktmodelldatenbank auf einem Gro{\ss}teil der Landesfl\"ache angewendet werden. Der Aufsatz gibt einen \"Uberblick \"uber das Verfahren zur automatischen Orientierungsbestimmung und dessen Integration mit besonderem Schwerpunkt auf den praktischen Einsatz beim Landesvermessungsamt NRW.},
    city = {Bonn},
    proceeding = {Proc. of DGPF Conf.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Labe2002Automatische.pdf},
    }

  • M. Luxen and W. Förstner, “Characterizing Image Quality: Blind Estimation of the Point Spread Function from a Single Image,” in Proc. of the PCV’02 Symposium, 2002, p. A: 205.
    [BibTeX] [PDF]

    The paper describes a method for blind estimation of sharpness and resolving power from a single image. These measures can be used to characterize images in the context of the performance of image analysis procedures. The method assumes the point spread function (PSF) can be approximated by an anisotropic Gaussian. The width Sigma of the PSF is determined by the ratio Sigma_g/Sigma_g’ of the standard deviations of the intensity and of its derivative at edges. The contrast sensitivity function (CSF) is based on an optimal model for detecting straight edges between homogeneous regions in noisy images. It depends on the signal to noise ratio and is linear in the frequency. The method is applied to artificial and real images proving that it gives valuable results.

    @InProceedings{luxen2002characterizing,
    title = {Characterizing Image Quality: Blind Estimation of the Point Spread Function from a Single Image},
    author = {Luxen, Marc and F\"orstner, Wolfgang},
    booktitle = {Proc. of the PCV'02 Symposium},
    year = {2002},
    pages = {A: 205},
    abstract = {The paper describes a method for blind estimation of sharpness and resolving power from a single image. These measures can be used to characterize images in the context of the performance of image analysis procedures. The method assumes the point spread function (PSF) can be approximated by an anisotropic Gaussian. The width Sigma of the PSF is determined by the ratio Sigma_g/Sigma_g' of the standard deviations of the intensity and of its derivative at edges. The contrast sensitivity function (CSF) is based on an optimal model for detecting straight edges between homogeneous regions in noisy images. It depends on the signal to noise ratio and is linear in the frequency. The method is applied to artificial and real images proving that it gives valuable results.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Luxen2002Characterizing.pdf},
    }

  • H. Schuster, “Bildsegmentierung mit stochastischen Bildpyramiden,” Diplomarbeit Master Thesis, 2002.
    [BibTeX]

    Die Bildsegmentierung ist ein unverzichtbarer Schritt zur Vorverarbeitung von Bildern für die Objekterkennung. Die Stochastische Bildpyramide ist ein hierarchischer Ansatz der Segmentierung, der auf einer durch einen Zufallsprozess gesteuerten, unregelmäßigen Pyramide beruht. Aufgabe des Diplomanden ist es, die theoretischen Grundlagen der stochastischen Bildpyramide aufzuarbeiten und zu untersuchen, in wieweit sich diese Struktur in Verbindung mit etablierten Segmentierungsalgorithmen zur Bildsegmentierung einsetzen läßt.

    @MastersThesis{schuster2002bildsegmentierung,
    title = {Bildsegmentierung mit stochastischen Bildpyramiden},
    author = {Schuster, Hanns-Florian},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2002},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dr.-Ing. Ansgar Brunn},
    type = {Diplomarbeit},
    abstract = {Die Bildsegmentierung ist ein unverzichtbarer Schritt zur Vorverarbeitung von Bildern f\"ur die Objekterkennung. Die Stochastische Bildpyramide ist ein hierarchischer Ansatz der Segmentierung, der auf einer durch einen Zufallsprozess gesteuerten, unregelm\"a{\ss}igen Pyramide beruht. Aufgabe des Diplomanden ist es, die theoretischen Grundlagen der stochastischen Bildpyramide aufzuarbeiten und zu untersuchen, in wieweit sich diese Struktur in Verbindung mit etablierten Segmentierungsalgorithmen zur Bildsegmentierung einsetzen l\"a{\ss}t.},
    city = {Bonn},
    }

  • C. Stachniss, “Zielgerichtete Kollisionsvermeidung für mobile Roboter in dynamischen Umgebungen,” Master Thesis, 2002.
    [BibTeX] [PDF]
    [none]
    @MastersThesis{stachniss2002,
    title = {{Z}ielgerichtete {K}ollisionsvermeidung f{\"u}r mobile {R}oboter in dynamischen {U}mgebungen},
    author = {Stachniss, C.},
    school = {University of Freiburg, Department of Computer Science},
    year = {2002},
    note = {In German},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/stachniss02diplom.pdf},
    }

  • C. Stachniss and W. Burgard, “An Integrated Approach to Goal-directed Obstacle Avoidance under Dynamic Constraints for Dynamic Environments,” in Proc. of the IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), Lausanne, Switzerland, 2002, p. 508–513.
    [BibTeX] [PDF]
    [none]
    @InProceedings{stachniss2002a,
    title = {An Integrated Approach to Goal-directed Obstacle Avoidance under Dynamic Constraints for Dynamic Environments},
    author = {Stachniss, C. and Burgard, W.},
    booktitle = iros,
    year = {2002},
    address = {Lausanne, Switzerland},
    pages = {508--513},
    abstract = {[none]},
    timestamp = {2014.04.24},
    url = {https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/stachniss02iros.pdf},
    }

2001

  • F. Blau and P. Germer, “Verfahren zur 3D-Rekoonstruktion von Zylindern aus digitalen Bildern und dessen Beurteilung,” Diplomarbeit Master Thesis, 2001.
    [BibTeX]
    [none]
    @MastersThesis{blau2001verfahren,
    title = {Verfahren zur 3D-Rekoonstruktion von Zylindern aus digitalen Bildern und dessen Beurteilung},
    author = {Blau, Fabian and Germer, Peter},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2001},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Inform. Stephan Heuel},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • W. Förstner, “Generic Estimation Procedures for Orientation with Minimum and Redundant Information,” in Calibration and Orientation of Cameras in Computer Vision, A. Gruen and T. S. Huang, Eds., Springer, 2001. doi:10.1007/978-3-662-04567-1_3
    [BibTeX] [PDF]

    Orientation of cameras with minimum or redundant information is the first step in 3D-scene analysis. The difficulty of this task lies in the lack of generic and robust procedures for geometric reasoning, calibration and especially orientation. The paper collects available tools from statistics, expecially for the diagnosis of data and design and for coping with outliers using robust estimation tecniques. It presents a generic strategy for data analysis on the contest of orientation procedures which may be extendet towards self-calibration.

    @InCollection{forstner2001generic,
    title = {Generic Estimation Procedures for Orientation with Minimum and Redundant Information},
    author = {F\"orstner, Wolfgang},
    booktitle = {Calibration and Orientation of Cameras in Computer Vision},
    publisher = {Springer},
    year = {2001},
    editor = {A. Gruen and T. S. Huang},
    number = {34},
    series = {Series in Information Sciences},
    abstract = {Orientation of cameras with minimum or redundant information is the first step in 3D-scene analysis. The difficulty of this task lies in the lack of generic and robust procedures for geometric reasoning, calibration and especially orientation. The paper collects available tools from statistics, expecially for the diagnosis of data and design and for coping with outliers using robust estimation tecniques. It presents a generic strategy for data analysis on the contest of orientation procedures which may be extendet towards self-calibration.},
    doi = {10.1007/978-3-662-04567-1_3},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner2001Generic.pdf},
    }

  • W. Förstner, “Algebraic Projective Geometry and Direct Optimal Estimation of Geometric Entities,” in OeAGM 2001, 2001.
    [BibTeX] [PDF]

    The paper presents a new technique for optimal estimation for statistically uncertain geometric entites. It is an extension of the classical eigenvector solution technique but takes the full covariance information into account to arrive at a ML-estimate. The proposed solution is significantly more transparent than the solution for estimation under heteroscedasticity proposed by Leedan, Matei and Meer. We give a new representation of algebraic projective geometry easing statistical reasoning. We show how the setup can be used in object reconstruction, especially when estimating points and edges of polyhedra. We explicitely give an example for estimating 3D-points and 3D-lines from image points and image lines. The direct solutions do practically require no approximate values.

    @InProceedings{forstner2001algebraic,
    title = {Algebraic Projective Geometry and Direct Optimal Estimation of Geometric Entities},
    author = {F\"orstner, Wolfgang},
    booktitle = {OeAGM 2001},
    year = {2001},
    abstract = {The paper presents a new technique for optimal estimation for statistically uncertain geometric entites. It is an extension of the classical eigenvector solution technique but takes the full covariance information into account to arrive at a ML-estimate. The proposed solution is significantly more transparent than the solution for estimation under heteroscedasticity proposed by Leedan, Matei and Meer. We give a new representation of algebraic projective geometry easing statistical reasoning. We show how the setup can be used in object reconstruction, especially when estimating points and edges of polyhedra. We explicitely give an example for estimating 3D-points and 3D-lines from image points and image lines. The direct solutions do practically require no approximate values.},
    city = {Bonn},
    proceeding = {appeared at the OeAGM 2001},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner2001Algebraic.pdf},
    }

  • W. Förstner, “On Estimating 2D Points and Lines from 2D Points and Lines,” in Festschrift anläßlich des 60. Geburtstages von Prof. Dr.-Ing. Bernhard Wrobel, Technische Universität Darmstadt, 2001, p. 69 – 87.
    [BibTeX] [PDF]

    The paper presents tools for optimally estimating 3D points and lines from 2D points and lines. It uses algebraic projective geometry for representing 2D and 3D geometric entities, perspective projection and its inversion. The uncertainty of the entities can easily be integrated. The direct solutions do not require approximate values.

    @InCollection{forstner2001estimating,
    title = {On Estimating 2D Points and Lines from 2D Points and Lines},
    author = {F\"orstner, Wolfgang},
    booktitle = {Festschrift anl\"a{\ss}lich des 60. Geburtstages von Prof. Dr.-Ing. Bernhard Wrobel},
    publisher = {Technische Universit\"at Darmstadt},
    year = {2001},
    pages = {69 -- 87},
    abstract = {The paper presents tools for optimally estimating 3D points and lines from 2D points and lines. It uses algebraic projective geometry for representing 2D and 3D geometric entities, perspective projection and its inversion. The uncertainty of the entities can easily be integrated. The direct solutions do not require approximate values.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner2001Estimating.pdf},
    }

  • S. Heuel, “Points, Lines and Planes and their Optimal Estimation,” in Pattern Recognition, 23rd DAGM Symposium, München, 2001, p. 92–99. doi:10.1007/3-540-45404-7_13
    [BibTeX] [PDF]

    We present a method for estimating unknown geometric entities based on identical, incident, parallel or orthogonal observed entities. These entities can be points and lines in 2D and points, lines and planes in 3D. We don’t need any approximate values for the unknowns. The entities are represented as homogeneous vectors or matrices, which leads to an easy formulation for a linear estimation model. Applications of the estimation method are manifold, ranging from 2D corner detection to 3D grouping.

    @InProceedings{heuel2001points,
    title = {Points, Lines and Planes and their Optimal Estimation},
    author = {Heuel, Stephan},
    booktitle = {Pattern Recognition, 23rd DAGM Symposium},
    year = {2001},
    address = {M\"unchen},
    editor = {Radig, Bernd and Florczyk, Stefan},
    month = {September},
    number = {2191},
    pages = {92--99},
    publisher = {Springer},
    series = {LNCS},
    abstract = {We present a method for estimating unknown geometric entities based on identical, incident, parallel or orthogonal observed entities. These entities can be points and lines in 2D and points, lines and planes in 3D. We don't need any approximate values for the unknowns. The entities are represented as homogeneous vectors or matrices, which leads to an easy formulation for a linear estimation model. Applications of the estimation method are manifold, ranging from 2D corner detection to 3D grouping.},
    doi = {10.1007/3-540-45404-7_13},
    postscript = {https://www.ipb.uni-bonn.de/ipb/lit/papers01/heuel01.points.ps.gz},
    url = {https://www.ipb.uni-bonn.de/pdfs/Heuel2001Points.pdf},
    }

  • S. Heuel and W. Förstner, “Matching, Reconstructing and Grouping 3D Lines From Multiple Views Using Uncertain Projective Geometry,” in CVPR ’01, 2001, p. 721. doi:10.1109/CVPR.2001.991006
    [BibTeX] [PDF]

    We present a geometric method for (i) matching 2D line segments from multiple oriented images, (ii) optimally reconstructing 3D line segments and (iii) grouping 3D line segments to corners. The proposed algorithm uses two developments in combining projective geometry and statistics, which are described in this article: (i) the geometric entities points, lines and planes in 2D and 3D and their uncertainty are represented in homogeneous coordinates and new entities may be constructed including their propagated uncertainty. The construction can be performed directly or as an estimation. (ii) relations such as incidence, equality, parallelity and orthogonality between points, lines and planes can be tested statistically based on a given significance level. Using these tools, the resulting algorithm is straight-forward and gives reasonable results. It is only based on geometric information and does not use any image intensities, though it can be extended to use other information. The matching of 3D lines does not need any thresholds other than a significance value for the hypotheses tests.

    @InProceedings{heuel2001matching,
    title = {Matching, Reconstructing and Grouping 3D Lines From Multiple Views Using Uncertain Projective Geometry},
    author = {Heuel, Stephan and F\"orstner, Wolfgang},
    booktitle = {CVPR '01},
    year = {2001},
    organization = {IEEE},
    pages = {721},
    abstract = {We present a geometric method for (i) matching 2D line segments from multiple oriented images, (ii) optimally reconstructing 3D line segments and (iii) grouping 3D line segments to corners. The proposed algorithm uses two developments in combining projective geometry and statistics, which are described in this article: (i) the geometric entities points, lines and planes in 2D and 3D and their uncertainty are represented in homogeneous coordinates and new entities may be constructed including their propagated uncertainty. The construction can be performed directly or as an estimation. (ii) relations such as incidence, equality, parallelity and orthogonality between points, lines and planes can be tested statistically based on a given significance level. Using these tools, the resulting algorithm is straight-forward and gives reasonable results. It is only based on geometric information and does not use any image intensities, though it can be extended to use other information. The matching of 3D lines does not need any thresholds other than a significance value for the hypotheses tests.},
    doi = {10.1109/CVPR.2001.991006},
    postscript = {https://www.ipb.uni-bonn.de/ipb/lit/papers01/heuel01.matching.ps.gz},
    url = {https://www.ipb.uni-bonn.de/pdfs/Heuel2001Matching.pdf},
    }

  • S. Heuel and W. Förstner, “Topological and geometrical models for building extraction from multiple images,” in Automatic Extraction of Man-Made Objects from Aerial and Space Images (III), 2001.
    [BibTeX] [PDF]

    The paper discusses models for building extraction from multiple images and shows the importance of the joint use of topological relations and uncertain geometry resulting in a platform for spatial reasoning useful for the reconstruction of manmade objects. We motivate our approach based on the experience in building reconstruction and describe tools for topological and geometric reasoning under uncertainty. We use a polyhedral patch model as intermediate layer for building interpretation.

    @InProceedings{heuel2001topological,
    title = {Topological and geometrical models for building extraction from multiple images},
    author = {Heuel, Stephan and F\"orstner, Wolfgang},
    booktitle = {Automatic Extraction of Man-Made Objects from Aerial and Space Images (III)},
    year = {2001},
    publisher = {Balkema Publishers},
    abstract = {The paper discusses models for building extraction from multiple images and shows the importance of the joint use of topological relations and uncertain geometry resulting in a platform for spatial reasoning useful for the reconstruction of manmade objects. We motivate our approach based on the experience in building reconstruction and describe tools for topological and geometric reasoning under uncertainty. We use a polyhedral patch model as intermediate layer for building interpretation.},
    url = {https://www.ipb.uni-bonn.de/ipb/lit/papers01/heuel01.topological.html},
    }

  • S. Heuel and T. H. Kolbe, “Building Reconstruction: The Dilemma of Generic Versus Specific Models,” KI – Zeitschrift für Künstliche Intelligenz, iss. 3, p. 57–62, 2001.
    [BibTeX] [PDF]

    Automatic building extraction from images is a particularly hard object recognition problem, because both the image data and the models to be reconstructed reveal a high complexity. Whereas models have to be generic in order to fit most of the observable different building shapes they also have to be building specific to discriminate buildings from other objects in the images. This situation describes the typical dilemma that all building recognition system have to cope with. In this article we present and discuss two approaches for automatic building reconstruction that were developed at the University of Bonn during the last eight years. It is shown how different AI methods were employed to solve the numerous problems concerning modeling, inference and uncertain reasoning, matching, and evaluation. Both approaches have been implemented and were successfully applied to real data. Due to complementary limitations, the integration of both would be desirable in order to develop a more comprehensive solution. Unfortunately, a number of open questions still have to be answered which are discussed at the end of the article.

    @Article{heuel2001building,
    title = {Building Reconstruction: The Dilemma of Generic Versus Specific Models},
    author = {Heuel, Stephan and Kolbe, T.H.},
    journal = {KI - Zeitschrift f\"ur K\"unstliche Intelligenz},
    year = {2001},
    month = jul,
    number = {3},
    pages = {57--62},
    abstract = {Automatic building extraction from images is a particularly hard object recognition problem, because both the image data and the models to be reconstructed reveal a high complexity. Whereas models have to be generic in order to fit most of the observable different building shapes they also have to be building specific to discriminate buildings from other objects in the images. This situation describes the typical dilemma that all building recognition system have to cope with. In this article we present and discuss two approaches for automatic building reconstruction that were developed at the University of Bonn during the last eight years. It is shown how different AI methods were employed to solve the numerous problems concerning modeling, inference and uncertain reasoning, matching, and evaluation. Both approaches have been implemented and were successfully applied to real data. Due to complementary limitations, the integration of both would be desirable in order to develop a more comprehensive solution. Unfortunately, a number of open questions still have to be answered which are discussed at the end of the article.},
    url = {https://www.ipb.uni-bonn.de/papers/2001/heuel01.building.html},
    }

  • K. Kühnhenrich, “Wavelet-Transformation zur Repräsentation von diskreten Signalen,” Diplomarbeit Master Thesis, 2001.
    [BibTeX]
    [none]
    @MastersThesis{kuhnhenrich2001wavelet,
    title = {Wavelet-Transformation zur Repr\"asentation von diskreten Signalen},
    author = {K\"uhnhenrich, Karin},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2001},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dr.-Ing. Ansgar Brunn},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • M. Luxen and W. Förstner, “Optimal Camera Orientation from Points and Straight Lines,” in Proc. of the DAGM 2001, München, 2001, p. 84–91. doi:10.1007/3-540-45404-7_12
    [BibTeX] [PDF]

    The paper presents an optimal estimate for the projection matrix for points of a camera from an arbitrary mixture of six or more observed points and straight lines in object space. It gives expressions for determining the corresponding projection matrix for straight lines together with its covariance matrix. Examples on synthetic and real images demonstrate the feasibility of the approach.

    @InProceedings{luxen2001optimal,
    title = {Optimal Camera Orientation from Points and Straight Lines},
    author = {Luxen, Marc and F\"orstner, Wolfgang},
    booktitle = {Proc. of the DAGM 2001},
    year = {2001},
    address = {M\"unchen},
    editor = {Radig, Bernd and Florczyk, Stefan},
    pages = {84--91},
    abstract = {The paper presents an optimal estimate for the projection matrix for points of a camera from an arbitrary mixture of six or more observed points and straight lines in object space. It gives expressions for determining the corresponding projection matrix for straight lines together with its covariance matrix. Examples on synthetic and real images demonstrate the feasibility of the approach.},
    city = {Bonn},
    doi = {10.1007/3-540-45404-7_12},
    proceeding = {Proc. of the DAGM 2001},
    url = {https://www.ipb.uni-bonn.de/pdfs/Luxen2001Optimal.pdf},
    }

  • K. Wolff and W. Förstner, “Efficiency of Feature Matching for Single- and Multi-Media Geometry using Multiple View Relations,” in Optical 3-D Measurement Techniques V, Vienna, Austria, 2001.
    [BibTeX]

    For optical 3D reconstruction, specially for real-time image sequence calculations, highly efficient algorithms are required. We discuss two aspects of increasing the efficiency of a matching algorithm based on feature points. The first aspect is the efficiency of checking the consistency of matching candidates using epipolar and trifocal constraints. The second aspect, namely the possibility of approximating the non projective mapping of multi-media geometry by a projective one, which leads to virtual cameras, is investigated in the main part of the paper. Exploiting the simplicity of algebraic expressions using normalized projective cameras we significantly increase the efficiency of the geometric computation during multiple image matching.

    @InProceedings{wolff2001efficiency,
    title = {Efficiency of Feature Matching for Single- and Multi-Media Geometry using Multiple View Relations},
    author = {Wolff, Kirsten and F\"orstner, Wolfgang:},
    booktitle = {Optical 3-D Measurement Techniques V},
    year = {2001},
    address = {Vienna, Austria},
    editor = {Gruen, A. and Kahmen, Heribert},
    abstract = {For optical 3D reconstruction, specially for real-time image sequence calculations, highly efficient algorithms are required. We discuss two aspects of increasing the efficiency of a matching algorithm based on feature points. The first aspect is the efficiency of checking the consistency of matching candidates using epipolar and trifocal constraints. The second aspect, namely the possibility of approximating the non projective mapping of multi-media geometry by a projective one, which leads to virtual cameras, is investigated in the main part of the paper. Exploiting the simplicity of algebraic expressions using normalized projective cameras we significantly increase the efficiency of the geometric computation during multiple image matching.},
    }

2000

  • C. Boxhammer, “Effizienz der Methode der konjugierten Gradienten bei der Rekonstruktion von Oberflächen aus digitalen Bildern,” Diplomarbeit Master Thesis, 2000.
    [BibTeX]
    [none]
    @MastersThesis{boxhammer2000effizienz,
    title = {Effizienz der Methode der konjugierten Gradienten bei der Rekonstruktion von Oberfl\"achen aus digitalen Bildern},
    author = {Boxhammer, Christian},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2000},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dr.-Ing. Jochen Meidow},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • A. Brunn, “Analyse von Laserscannerdaten zur Gebäuderekonstruktion,” Zeitschrift für Photogrammetrie, Fernerkundung und Geoinformationssysteme, iss. 3, p. 189–190, 2000.
    [BibTeX] [PDF]

    The paper discusses models for building extraction from multiple images and shows the importance of the joint use of topological relations and uncertain geometry resulting in a platform for spatial reasoning useful for the reconstruction of manmade objects. We motivate our approach based on the experience in building reconstruction and describe tools for topological and geometric reasoning under uncertainty. We use a polyhedral patch model as intermediate layer for building interpretation.

    @Article{brunn2000analyse,
    title = {Analyse von Laserscannerdaten zur Geb\"auderekonstruktion},
    author = {Brunn, Ansgar},
    journal = {Zeitschrift f\"ur Photogrammetrie, Fernerkundung und Geoinformationssysteme},
    year = {2000},
    number = {3},
    pages = {189--190},
    abstract = {The paper discusses models for building extraction from multiple images and shows the importance of the joint use of topological relations and uncertain geometry resulting in a platform for spatial reasoning useful for the reconstruction of manmade objects. We motivate our approach based on the experience in building reconstruction and describe tools for topological and geometric reasoning under uncertainty. We use a polyhedral patch model as intermediate layer for building interpretation.},
    url = {https://www.schweizerbart.de/j/pfg/D-pfg.html},
    }

  • A. Brunn, “Semantik-basierte Gebäudeerfassung mit verkoppelten Markoff-Zufallsfeldern,” PhD Thesis, 2000.
    [BibTeX] [PDF]

    \textbf{Summary} The thesis develops a new automatic algorithm for the acquisition of buildings from digital surface models. In contrast to most of the already published techniques, the new algorithm closely combines the reconstruction with the interpretation. We choose a building representation based on its topology, which enables a combined evalation of different sensor types. We use CW-complexes for the topological building representation. The elements of the CW-complexes, the cells, are classified locally. We take neighborhood relations into account by conditional probabilities to improve the classification result. Therefore we define a neighborhood system on the CW-complex and build a Markov-Random-Field. We automatically learn building models from representative examples. The buildings are reconstructed locally using the classification result. By calculation of various building reconstructions using distinct building modells, a classification of the type of the complete building is done. Establishing a reduced building model, we dicuss synthetic and real examples to show the capabilities of the new algorithm. The importance of the approach is shown by the examples. \textbf{Zusammenfassung} In dieser Arbeit wird ein neues automatisches Verfahren zur Erfassung von Gebäuden aus Digitalen Oberflächenmodellen entwickelt. Im Gegensatz zu den meisten bisher in der Literatur beschriebenen Verfahren wird in diesem Ansatz die Rekonstruktion der Gebäude eng mit der Interpretation verknüpft. Es wird ein topologischer Ansatz zur Gebäuderekonstruktion gewählt, der die integrierte Auswertung von unterschiedlichen Datenquellen ermöglicht. Die Topologie eines Objektes lässt sich durch einen CW-Komplex repräsentieren. In dieser Arbeit werden CW-Komplexe zur Repräsentation der Topologie der Gebäudeoberfläche genutzt. Die Bestandteile des CW-Komplexes, die Zellen (Punkte, Kanten und Flächen), werden lokal klassifiziert. Zur Verbesserung der lokalen Klassifikationen werden Nachbarschaftsbedingungen mittels bedingter Wahrscheinlichkeiten berücksichtigt. Dazu wird eine Nachbarschaftsordnung auf dem CW-Komplex definiert und ein verkoppeltes Markoff-Zufallsfeld formuliert. Wir lernen die Gebäudemodelle automatisch aus repräsentativen Interpretationen. Die Gebäude werden lokal -aufgrund der Klassifikation der Zellen – mittels robuster Schätzverfahren rekonstruiert. Die Gebäudetypen der rekonstruierten Gebäude werden durch das Markoff-Zufallsfeld klassifiziert. Zur Veranschaulichung des Potentials und Beurteilung des Verfahrens werden synthetische und reale Beispiele in einem verallgemeinerten geometrischem Modellraum angeführt und diskutiert.

    @PhDThesis{brunn2000semantik,
    title = {Semantik-basierte Geb\"audeerfassung mit verkoppelten Markoff-Zufallsfeldern},
    author = {Brunn, Ansgar},
    school = {Institute of Photogrammetry,University of Bonn},
    year = {2000},
    abstract = {\textbf{Summary} The thesis develops a new automatic algorithm for the acquisition of buildings from digital surface models. In contrast to most of the already published techniques, the new algorithm closely combines the reconstruction with the interpretation. We choose a building representation based on its topology, which enables a combined evalation of different sensor types. We use CW-complexes for the topological building representation. The elements of the CW-complexes, the cells, are classified locally. We take neighborhood relations into account by conditional probabilities to improve the classification result. Therefore we define a neighborhood system on the CW-complex and build a Markov-Random-Field. We automatically learn building models from representative examples. The buildings are reconstructed locally using the classification result. By calculation of various building reconstructions using distinct building modells, a classification of the type of the complete building is done. Establishing a reduced building model, we dicuss synthetic and real examples to show the capabilities of the new algorithm. The importance of the approach is shown by the examples. \textbf{Zusammenfassung} In dieser Arbeit wird ein neues automatisches Verfahren zur Erfassung von Geb\"auden aus Digitalen Oberfl\"achenmodellen entwickelt. Im Gegensatz zu den meisten bisher in der Literatur beschriebenen Verfahren wird in diesem Ansatz die Rekonstruktion der Geb\"aude eng mit der Interpretation verkn\"upft. Es wird ein topologischer Ansatz zur Geb\"auderekonstruktion gew\"ahlt, der die integrierte Auswertung von unterschiedlichen Datenquellen erm\"oglicht. Die Topologie eines Objektes l\"asst sich durch einen CW-Komplex repr\"asentieren. In dieser Arbeit werden CW-Komplexe zur Repr\"asentation der Topologie der Geb\"audeoberfl\"ache genutzt. Die Bestandteile des CW-Komplexes, die Zellen (Punkte, Kanten und Fl\"achen), werden lokal klassifiziert. Zur Verbesserung der lokalen Klassifikationen werden Nachbarschaftsbedingungen mittels bedingter Wahrscheinlichkeiten ber\"ucksichtigt. Dazu wird eine Nachbarschaftsordnung auf dem CW-Komplex definiert und ein verkoppeltes Markoff-Zufallsfeld formuliert. Wir lernen die Geb\"audemodelle automatisch aus repr\"asentativen Interpretationen. Die Geb\"aude werden lokal -aufgrund der Klassifikation der Zellen - mittels robuster Sch\"atzverfahren rekonstruiert. Die Geb\"audetypen der rekonstruierten Geb\"aude werden durch das Markoff-Zufallsfeld klassifiziert. Zur Veranschaulichung des Potentials und Beurteilung des Verfahrens werden synthetische und reale Beispiele in einem verallgemeinerten geometrischem Modellraum angef\"uhrt und diskutiert.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Brunn2000Semantik.pdf},
    }

  • A. Brunn, “A step towards semantic-based Building Reconstruction Using Markov-Random-Fields,” in ISPRS Congress, Amsterdam, Amsterdam, 2000, p. 117–124, 3A.
    [BibTeX] [PDF]

    In this paper we describe a new concept for the reconstruction of buildings. In contrast to most of the published approaches, we link the reconstruction process with the building interpretation. With this linkage we want to enhance the reconstruction result and to yield semantic information about the buildings. We introduce building models based on their topology. We also may use data from different sensor types. The analysis is done locally using statistical building information for the interpretation in a Markov-Random-Field and using e. g. geometric or radiometric “appearance” models for the reconstruction. A real data example from laserscanner observations demonstrates the approach.

    @InProceedings{brunn2000step,
    title = {A step towards semantic-based Building Reconstruction Using Markov-Random-Fields},
    author = {Brunn, Ansgar},
    booktitle = {ISPRS Congress, Amsterdam},
    year = {2000},
    address = {Amsterdam},
    pages = {117--124, 3A},
    abstract = {In this paper we describe a new concept for the reconstruction of buildings. In contrast to most of the published approaches, we link the reconstruction process with the building interpretation. With this linkage we want to enhance the reconstruction result and to yield semantic information about the buildings. We introduce building models based on their topology. We also may use data from different sensor types. The analysis is done locally using statistical building information for the interpretation in a Markov-Random-Field and using e. g. geometric or radiometric "appearance" models for the reconstruction. A real data example from laserscanner observations demonstrates the approach.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Brunn2000step.pdf},
    }

  • W. Förstner, “Optimally Reconstructing the Geometry of Image Triplets,” in Computer Vision – ECCV 2000, 2000, p. 669–684. doi:10.1007/3-540-45053-X_43
    [BibTeX] [PDF]

    Optimally reconstructing the geometry of image triplets from point correspondences requires a proper weighting or selection of the used constraints between observedcoordinates andunknown parameters. By analysing the ML-estimation process the paper solves a set of yet unsolved problems: (1) The minimal set of four linearily independent trilinearities (Shashua 1995, Hartley 1995) actually imposes only three constraints onto the geometry of the image triplet. The seeming contradiction between the number of used constraints, three vs. four, can be explained naturally using the normal equations. (2) Direct application such an estimation suggests a pseudoinverse of a 4×4-matix having rank 3 which contains the covariance matrix of the homologeous image points to be the optimal weight matrix. (3) Insteadof using this singluar weight matrix one could select three linearily dependent constraints. This is discussed for the two classical cases of forward and lateral motion, and clarifies the algebraic analyis of dependencies between trilinear constraints by Faugeras 1995. Results of an image sequence with 800 images and an Euclidean parametrization of the trifocal tensor demonstrate the feasibility of the approach.

    @InProceedings{forstner2000optimally,
    title = {Optimally Reconstructing the Geometry of Image Triplets},
    author = {F\"orstner, Wolfgang},
    booktitle = {Computer Vision - ECCV 2000},
    year = {2000},
    editor = {Vernon, David},
    pages = {669--684},
    abstract = {Optimally reconstructing the geometry of image triplets from point correspondences requires a proper weighting or selection of the used constraints between observedcoordinates andunknown parameters. By analysing the ML-estimation process the paper solves a set of yet unsolved problems: (1) The minimal set of four linearily independent trilinearities (Shashua 1995, Hartley 1995) actually imposes only three constraints onto the geometry of the image triplet. The seeming contradiction between the number of used constraints, three vs. four, can be explained naturally using the normal equations. (2) Direct application such an estimation suggests a pseudoinverse of a 4x4-matix having rank 3 which contains the covariance matrix of the homologeous image points to be the optimal weight matrix. (3) Insteadof using this singluar weight matrix one could select three linearily dependent constraints. This is discussed for the two classical cases of forward and lateral motion, and clarifies the algebraic analyis of dependencies between trilinear constraints by Faugeras 1995. Results of an image sequence with 800 images and an Euclidean parametrization of the trifocal tensor demonstrate the feasibility of the approach.},
    city = {Bonn},
    doi = {10.1007/3-540-45053-X_43},
    proceeding = {Appeared in: Computer Vision - ECCV 2000},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner2000Optimally.pdf},
    }

  • W. Förstner, “Moderne Orientierungsverfahren,” Photogrammetrie, Fernerkundung, Geoinformation (PFG), vol. 3, pp. 163-176, 2000.
    [BibTeX] [PDF]
    [none]
    @Article{forstner2000:moderne,
    title = {Moderne Orientierungsverfahren},
    author = {F\"orstner, W.},
    journal = {Photogrammetrie, Fernerkundung, Geoinformation (PFG)},
    year = {2000},
    pages = {163-176},
    volume = {3},
    abstract = {[none]},
    timestamp = {2014.01.23},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner2000Moderne.pdf},
    }

  • W. Förstner, “Image Preprocessing for Feature Extraction in Digital Intensity, Color and Range Images,” in Geomatic Methods for the Analysis of Data in Earth Sciences, Springer, 2000, vol. 95/2000, p. 165–189. doi:10.1007/3-540-45597-3_4
    [BibTeX] [PDF]

    The paper discusses preprocessing for feature extraction in digital intensity, color and range images. Starting from a noise model, we develop estimates for a signal dependent noise variance function and a method to transform the image, to achieve an image with signal independent noise. Establishing significance tests and the fusion of different channels for extracting linear features is shown to be simplified.

    @InCollection{forstner2000image,
    title = {Image Preprocessing for Feature Extraction in Digital Intensity, Color and Range Images},
    author = {F\"orstner, Wolfgang},
    booktitle = {Geomatic Methods for the Analysis of Data in Earth Sciences},
    publisher = {Springer},
    year = {2000},
    pages = {165--189},
    series = {Lecture Notes in Earth Sciences},
    volume = {95/2000},
    abstract = {The paper discusses preprocessing for feature extraction in digital intensity, color and range images. Starting from a noise model, we develop estimates for a signal dependent noise variance function and a method to transform the image, to achieve an image with signal independent noise. Establishing significance tests and the fusion of different channels for extracting linear features is shown to be simplified.},
    doi = {10.1007/3-540-45597-3_4},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner2000Image.pdf},
    }

  • W. Förstner, “New Orientation Procedures,” in Proc. of the 19th ISPRS Congress, Amsterdam, 2000, p. 297–304, 3A.
    [BibTeX] [PDF]

    Orientation procedures are preceived as the central part of photogrammetry. During the last decade the problem of determining the interior and the exterior orientation of one or more cameras has found high attraction in Computer Vision. The problem was formulated newly within a projective framework for several reasons: (1) often, the calibration of the cameras in use was not known, nor could be determined; (2) often, no approximate values for the orientation and calibration parameters were available; (3) often, self-calibration turned out to be instable, especially in case of image sequences or of variable focal length; (4) special boundary conditions, such as planar objects or the coplanarity of the projection centres allowed orientation and calibration with less corresponding points; (5) generating new views from given ones turned out ot be possible without calibration; (6) using more than two cameras with the same interior orientation was proven to allow selfcalibration, after projective reconstruction; (7) the epipolar constraint for image pairs turned out to be not sufficient for image triplets in practically relevant cases; last but not least: (8) orientation procedures were not documented for non-photogrammetrists in photogrammetric literature. A set of new orientation and calibration procedures has evolved. The imaging process is described in a projective framework (SEMPLE & KNEEBONE 1952), explicitely interpreting the 11 parameters of the direct linear transformation, being the basis for a direct determination of the 6 parameters of the exterior and 5 parameters of the interior orientation. These 5 parameters guarantee the projection to map straight lines into straight lines. Cameras with some of these 5 parameters unknown are called uncalibrated. The relative orientation of two cameras with unknown calibration can be achieved by a direct solution from corresponding points, leading to the fundamental matrix F, having 7 degrees of freedom, establishing the coplanarity or epipolar constraint as matching constraint, and which can be used to determine the two principle distances. Restriction to calibrated cameras, F reduces to the essential matrix E with 5 degrees of freedom, already known in photogrammetry. The relative orientation of three cameras with unknown calibration can also be achieved by a direct solution, in this case from corresponding points and lines, leading to the trifocal tensor T, having 18 degrees of freedom. It establishes matching constraints for points and straight lines, and can be used to determine a part of the calibration parameters of the three cameras. Restriction to calibrated cameras reduces to a metrical parametrization of the trifocal tensor, with 11 degrees of freedom, combining relative orientation of the first two cameras and spatial resection of the third. The paper presents solutions to these problems useful for photogrammetric applications.

    @InProceedings{forstner2000new,
    title = {New Orientation Procedures},
    author = {F\"orstner, Wolfgang},
    booktitle = {Proc. of the 19th ISPRS Congress},
    year = {2000},
    address = {Amsterdam},
    pages = {297--304, 3A},
    abstract = {Orientation procedures are preceived as the central part of photogrammetry. During the last decade the problem of determining the interior and the exterior orientation of one or more cameras has found high attraction in Computer Vision. The problem was formulated newly within a projective framework for several reasons: (1) often, the calibration of the cameras in use was not known, nor could be determined; (2) often, no approximate values for the orientation and calibration parameters were available; (3) often, self-calibration turned out to be instable, especially in case of image sequences or of variable focal length; (4) special boundary conditions, such as planar objects or the coplanarity of the projection centres allowed orientation and calibration with less corresponding points; (5) generating new views from given ones turned out ot be possible without calibration; (6) using more than two cameras with the same interior orientation was proven to allow selfcalibration, after projective reconstruction; (7) the epipolar constraint for image pairs turned out to be not sufficient for image triplets in practically relevant cases; last but not least: (8) orientation procedures were not documented for non-photogrammetrists in photogrammetric literature. A set of new orientation and calibration procedures has evolved. The imaging process is described in a projective framework (SEMPLE & KNEEBONE 1952), explicitely interpreting the 11 parameters of the direct linear transformation, being the basis for a direct determination of the 6 parameters of the exterior and 5 parameters of the interior orientation. These 5 parameters guarantee the projection to map straight lines into straight lines. Cameras with some of these 5 parameters unknown are called uncalibrated. The relative orientation of two cameras with unknown calibration can be achieved by a direct solution from corresponding points, leading to the fundamental matrix F, having 7 degrees of freedom, establishing the coplanarity or epipolar constraint as matching constraint, and which can be used to determine the two principle distances. Restriction to calibrated cameras, F reduces to the essential matrix E with 5 degrees of freedom, already known in photogrammetry. The relative orientation of three cameras with unknown calibration can also be achieved by a direct solution, in this case from corresponding points and lines, leading to the trifocal tensor T, having 18 degrees of freedom. It establishes matching constraints for points and straight lines, and can be used to determine a part of the calibration parameters of the three cameras. Restriction to calibrated cameras reduces to a metrical parametrization of the trifocal tensor, with 11 degrees of freedom, combining relative orientation of the first two cameras and spatial resection of the third. The paper presents solutions to these problems useful for photogrammetric applications.},
    city = {Bonn},
    proceeding = {Appeared at the Proc. of the 19th ISPRS Congress},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner2000New.pdf},
    }

  • W. Förstner, A. Brunn, and S. Heuel, “Statistically Testing Uncertain Geometric Relations,” in Mustererkennung 2000, Kiel, 2000, p. 17–26. doi:10.1007/978-3-642-59802-9_3
    [BibTeX] [PDF]

    This paper integrates statistical reasoning and Grassmann-Cayley algebra for making 2D and 3D geometric reasoning practical. The multi-linearity of the forms allows rigorous error propagation and statistical testing of geometric relations. This is achieved by representing all objects in homogeneous coordinates and expressing all relations using standard matrix calculus.

    @InProceedings{forstner2000statistically,
    title = {Statistically Testing Uncertain Geometric Relations},
    author = {F\"orstner, Wolfgang and Brunn, Ansgar and Heuel, Stephan},
    booktitle = {Mustererkennung 2000},
    year = {2000},
    address = {Kiel},
    editor = {Sommer,G. and Kr\"uger, N. and Perwass, Ch.},
    month = sep,
    organization = {DAGM},
    pages = {17--26},
    publisher = {Springer},
    abstract = {This paper integrates statistical reasoning and Grassmann-Cayley algebra for making 2D and 3D geometric reasoning practical. The multi-linearity of the forms allows rigorous error propagation and statistical testing of geometric relations. This is achieved by representing all objects in homogeneous coordinates and expressing all relations using standard matrix calculus.},
    doi = {10.1007/978-3-642-59802-9_3},
    postscript = {https://www.ipb.uni-bonn.de/papers/2000/foerstner00.testing.ps.gz},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner2000Statistically.pdf},
    }

  • W. Förstner and K. Wolff, “Exploiting the Multi View Geometry for Automatic Surfaces Reconstruction Using Feature Based Matching in Multi Media Photogrammetry,” in Proc. of the 19th ISPRS Congress, Amsterdam, 2000, p. 900–907, 5B.
    [BibTeX] [PDF]

    In this paper we present a new method of a feature based matching algorithm for a 3D surface reconstruction exploiting the multiview geometry. The matching algorithm conceptually allows parallel processing treating all images equally. Especially the geometry of the image triplet is used, namely the trilinear relations between image features using the trifocal tensor. The method is transferred to multi media photogrammetry. The determination of the 3D point uses a direct method minimizing the algebraic error.

    @InProceedings{forstner2000exploiting,
    title = {Exploiting the Multi View Geometry for Automatic Surfaces Reconstruction Using Feature Based Matching in Multi Media Photogrammetry},
    author = {F\"orstner, Wolfgang and Wolff, Kirsten},
    booktitle = {Proc. of the 19th ISPRS Congress},
    year = {2000},
    address = {Amsterdam},
    pages = {900--907, 5B},
    abstract = {In this paper we present a new method of a feature based matching algorithm for a 3D surface reconstruction exploiting the multiview geometry. The matching algorithm conceptually allows parallel processing treating all images equally. Especially the geometry of the image triplet is used, namely the trilinear relations between image features using the trifocal tensor. The method is transferred to multi media photogrammetry. The determination of the 3D point uses a direct method minimizing the algebraic error.},
    city = {Bonn},
    proceeding = {Appeared at the Proc. of the 19th ISPRS Congress},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner2000Exploiting.pdf},
    }

  • A. Faber and W. Förstner, “Detection of dominant orthogonal road structures in small scale,” in Proc. of the 19th ISPRS Congress, Amsterdam, 2000, p. 274–281, 3A.
    [BibTeX] [PDF]

    The objective of the presented work is the automatic segmentation of urban areas from high resolution satellite images, such as MOMS-02 images or from aerial images taken from high altitude flights. The structure of urban areas, as seen from satellites or aeroplanes, is mainly characterized by three elements: the road network, the morphology of the built up areas and the distribution of the vegetation. There exist many types of road structures in large cities, which govern the local topology and geometry of the individual roads. Typical examples are orthogonal networks, star type networks or irregular networks. Seen world wide, orthogonal networks appear to be the most common ones, as e. g. to be found in Mannheim, Barcelona, New York or Canberra. The paper presents an approach for segmentation of dominant orthogonal road structures from high resolution satellite images, like MOMS-02, or aerial images.

    @InProceedings{faber2000detection,
    title = {Detection of dominant orthogonal road structures in small scale},
    author = {Faber, Anette and F\"orstner, Wolfgang},
    booktitle = {Proc. of the 19th ISPRS Congress},
    year = {2000},
    address = {Amsterdam},
    pages = {274--281, 3A},
    abstract = {The objective of the presented work is the automatic segmentation of urban areas from high resolution satellite images, such as MOMS-02 images or from aerial images taken from high altitude flights. The structure of urban areas, as seen from satellites or aeroplanes, is mainly characterized by three elements: the road network, the morphology of the built up areas and the distribution of the vegetation. There exist many types of road structures in large cities, which govern the local topology and geometry of the individual roads. Typical examples are orthogonal networks, star type networks or irregular networks. Seen world wide, orthogonal networks appear to be the most common ones, as e. g. to be found in Mannheim, Barcelona, New York or Canberra. The paper presents an approach for segmentation of dominant orthogonal road structures from high resolution satellite images, like MOMS-02, or aerial images.},
    city = {Bonn},
    proceeding = {Appeared at the Proc. of the 19th ISPRS Congress},
    url = {https://www.ipb.uni-bonn.de/pdfs/Faber2000Detection.pdf},
    }

  • P. Faber, “Image-based passenger detection and localization inside vehicles,” in Proc. of the 19th ISPRS Congress, 2000, p. 230–238, 5A.
    [BibTeX] [PDF]

    In our paper we describe the ongoing research to develop an intelligent airbag system. Using image sequences acquired from a stereo camera, we detect the form and the position of the driver and passenger seat. And, if a seat is classified as occupied, we try to estimate the geometry and position of the human’s head as the most distinguishing feature of the body, if it is possible. The developed software system consists of five steps: the correction of distortions followed by an epipolar rectification of the stereo images, the feature extraction, the feature-based matching, the seat occupation detection and verification, and the approximation of the human’s head. The emphasis in this paper is on the used model to approximate the human’s head by an ellipsoid. The base of a subset of the estimated features as well as a certain assignment of the features to a human’s head. The used model bases on the least square method with a condition, which supports the approximation of an ellipsoid. To determine, if an obtained approximation is valid the result is compared with the standard dimensions of a human’s head. On tests on numerous image sequences recorded inside different vehicles the feasibility of the system is shown. The information about the seat occupation and the location of the detected passengers inside the vehicle can be used to control an intelligent airbag deployment.

    @InProceedings{faber2000image,
    title = {Image-based passenger detection and localization inside vehicles},
    author = {Faber, Petko},
    booktitle = {Proc. of the 19th ISPRS Congress},
    year = {2000},
    pages = {230--238, 5A},
    abstract = {In our paper we describe the ongoing research to develop an intelligent airbag system. Using image sequences acquired from a stereo camera, we detect the form and the position of the driver and passenger seat. And, if a seat is classified as occupied, we try to estimate the geometry and position of the human's head as the most distinguishing feature of the body, if it is possible. The developed software system consists of five steps: the correction of distortions followed by an epipolar rectification of the stereo images, the feature extraction, the feature-based matching, the seat occupation detection and verification, and the approximation of the human's head. The emphasis in this paper is on the used model to approximate the human's head by an ellipsoid. The base of a subset of the estimated features as well as a certain assignment of the features to a human's head. The used model bases on the least square method with a condition, which supports the approximation of an ellipsoid. To determine, if an obtained approximation is valid the result is compared with the standard dimensions of a human's head. On tests on numerous image sequences recorded inside different vehicles the feasibility of the system is shown. The information about the seat occupation and the location of the detected passengers inside the vehicle can be used to control an intelligent airbag deployment.},
    city = {Bonn},
    proceeding = {Appeared at the Proc. of the 19th ISPRS Congress},
    url = {https://www.ipb.uni-bonn.de/pdfs/Faber2000Image.pdf},
    }

  • P. Faber, “Seat Occupation Detection Inside Vehicles,” in IEEE Southwest Symposium on Image Analysis and Interpretation, Texas, UAS, 2000, p. 187–191. doi:10.1109/IAI.2000.839597
    [BibTeX]

    In our paper we address to the problem of robust seat occupation detection inside vehicles. The used approach consists of four steps: correction of distortions followed by an epipolar rectification of the stereo images, feature extraction, feature-based matching, and the seat occupation detection and verification. The focus in this paper is on the verification of the seat occupation. The step of verification corresponds to a classification of the driver and the passenger seat as occupied or empty. First, we try to estimate the seat geometry and localization. Implicitly it can be deduced from the results, that if a seat can be modeled adapted to the data, the seat is empty. Otherwise we can assume that the seat is occupied by an object. Then, we try to differ between an occupation by a human, or any other object. On tests on numerous image sequences recorded inside different vehicles the feasibility of the approach is shown.

    @InProceedings{faber2000seat,
    title = {Seat Occupation Detection Inside Vehicles},
    author = {Faber, Petko},
    booktitle = {IEEE Southwest Symposium on Image Analysis and Interpretation},
    year = {2000},
    address = {Texas, UAS},
    pages = {187--191},
    abstract = {In our paper we address to the problem of robust seat occupation detection inside vehicles. The used approach consists of four steps: correction of distortions followed by an epipolar rectification of the stereo images, feature extraction, feature-based matching, and the seat occupation detection and verification. The focus in this paper is on the verification of the seat occupation. The step of verification corresponds to a classification of the driver and the passenger seat as occupied or empty. First, we try to estimate the seat geometry and localization. Implicitly it can be deduced from the results, that if a seat can be modeled adapted to the data, the seat is empty. Otherwise we can assume that the seat is occupied by an object. Then, we try to differ between an occupation by a human, or any other object. On tests on numerous image sequences recorded inside different vehicles the feasibility of the approach is shown.},
    doi = {10.1109/IAI.2000.839597},
    }

  • B. Graeff, “Mehrbildzuordnung von projektiv verzerrten Bildern für die Qualitätsbewertung von Bildanalyseprozessen,” Diplomarbeit Master Thesis, 2000.
    [BibTeX]
    [none]
    @MastersThesis{graeff2000mehrbildzuordnung,
    title = {Mehrbildzuordnung von projektiv verzerrten Bildern f\"ur die Qualit\"atsbewertung von Bildanalyseprozessen},
    author = {Graeff, Bastian},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2000},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner,},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • S. Heuel, W. Förstner, and F. Lang, “Topological and Geometrical Reasoning in 3D Grouping for Reconstructing Polyhedral Surfaces,” in Proc. of the 19th ISPRS Congress, Amsterdam, 2000, p. 397–404, 3A.
    [BibTeX] [PDF]

    We are developing a system for reconstructing polyhedral surfaces from multiple images. This process can take advantage of the topological relations of the observed image features triggering and therefore speeding up the grouping of features to polyhedral surfaces. Exploiting the statistical properties of features when grouping them leads to consistent decisions being invariant to numbering and choice of coordinate system and allows statistical testing. This simplifies the choice of thresholds to the definition of a scene independent significance level. We decribe the topological and statistical models used within our system. Experiments with synthetic and real data prove the feasibility of the approach.

    @InProceedings{heuel2000topological,
    title = {Topological and Geometrical Reasoning in 3D Grouping for Reconstructing Polyhedral Surfaces},
    author = {Heuel, Stephan and F\"orstner, Wolfgang and Lang, Felicitas},
    booktitle = {Proc. of the 19th ISPRS Congress},
    year = {2000},
    address = {Amsterdam},
    pages = {397--404, 3A},
    abstract = {We are developing a system for reconstructing polyhedral surfaces from multiple images. This process can take advantage of the topological relations of the observed image features triggering and therefore speeding up the grouping of features to polyhedral surfaces. Exploiting the statistical properties of features when grouping them leads to consistent decisions being invariant to numbering and choice of coordinate system and allows statistical testing. This simplifies the choice of thresholds to the definition of a scene independent significance level. We decribe the topological and statistical models used within our system. Experiments with synthetic and real data prove the feasibility of the approach.},
    city = {Bonn},
    proceeding = {Appeared at the Proc. of the 19th ISPRS Congress},
    url = {https://www.ipb.uni-bonn.de/pdfs/Heuel2000Topological.pdf},
    }

  • M. Luxen, “Der EM-Algorithmus bei Schätz- und Klassifikationsverfahren,” Diplomarbeit Master Thesis, 2000.
    [BibTeX] [PDF]
    [none]
    @MastersThesis{luxen2000der,
    title = {Der EM-Algorithmus bei Sch\"atz- und Klassifikationsverfahren},
    author = {Luxen, Marc},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2000},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. Ansgar Brunn},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Luxen2000EM-Algorithmus.pdf},
    }

  • L. Ragia, “A Quality Model for Spatial Objects,” in Proc. of the 19th ISPRS Congress, Amsterdam, 2000, p. 855–862, 4C.
    [BibTeX] [PDF]

    {This paper presents a concept for analysing the quality of 2 1/2 – D spatial objects. The developed method is based on the evaluation of specific quality parameters. These parameters are determined by a topological and geometrical analysis. The quality parameters are classified into three categories: green=accepted

    @InProceedings{ragia2000quality,
    title = {A Quality Model for Spatial Objects},
    author = {Ragia, Lemonia},
    booktitle = {Proc. of the 19th ISPRS Congress},
    year = {2000},
    address = {Amsterdam},
    pages = {855--862, 4C},
    abstract = {This paper presents a concept for analysing the quality of 2 1/2 - D spatial objects. The developed method is based on the evaluation of specific quality parameters. These parameters are determined by a topological and geometrical analysis. The quality parameters are classified into three categories: green=accepted, yellow=uncertain, red=rejected, depending on the specifications. We give confidence regions for all quality parameters, especially for completeness, false alarm rate and detection rate. The feasibility of the method is shown by using real examples taking into account the technical specifications.},
    city = {Bonn},
    proceeding = {Appeared at the Proc. of the 19th ISPRS Congress},
    url = {https://www.ipb.uni-bonn.de/pdfs/Ragia2000Quality.pdf},
    }

  • P. Schmalkoke, “Bestimmung der Eigenbewegung eines Stereo-Kamera-Systems auf der Grundlage quasi-kontinuierlicher Bilddatenauswertung,” Diplomarbeit Master Thesis, 2000.
    [BibTeX]
    [none]
    @MastersThesis{schmalkoke2000bestimmung,
    title = {Bestimmung der Eigenbewegung eines Stereo-Kamera-Systems auf der Grundlage quasi-kontinuierlicher Bilddatenauswertung},
    author = {Schmalkoke, Peter},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {2000},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. Ansgar Brunn},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

1999

  • W. Förstner, “On Estimating Rotations,” in Festschrift für Prof. Dr.-Ing. Heinrich Ebner zum 60. Geburtstag., Lehrstuhl für Photogrammetrie und Fernerkundung, TU München, 1999.
    [BibTeX] [PDF]

    The paper collects tools for estimating rotations. Starting from the classical representations with quaternions concatenation rules for the Rodriguez parameters are given. Direct estimates for mean rotations and for rotations from homologous spatial directions are given. Two robust estimation procedures are given for estimating the rotation matrix of a single camera from observed straight line segments in a legoland scene based on a grouping procedure for line segment and and a clustering procedure on the 3-sphere.

    @InProceedings{forstner1999estimating,
    title = {On Estimating Rotations},
    author = {F\"orstner, Wolfgang},
    booktitle = {Festschrift f\"ur Prof. Dr.-Ing. Heinrich Ebner zum 60. Geburtstag.},
    year = {1999},
    address = {Lehrstuhl f\"ur Photogrammetrie und Fernerkundung, TU M\"unchen},
    editor = {Heipke, C. and Mayer, H.},
    abstract = {The paper collects tools for estimating rotations. Starting from the classical representations with quaternions concatenation rules for the Rodriguez parameters are given. Direct estimates for mean rotations and for rotations from homologous spatial directions are given. Two robust estimation procedures are given for estimating the rotation matrix of a single camera from observed straight line segments in a legoland scene based on a grouping procedure for line segment and and a clustering procedure on the 3-sphere.},
    city = {Bonn},
    proceeding = {Festschrift f\"ur Prof. Dr.-Ing. Heinrich Ebner zum 60. Geburtstag},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1999Estimating.pdf},
    }

  • W. Förstner, “3D-City Models: Automatic and Semiautomatic Acquisition Methods,” in Photogrammetrische Woche, Stuttgart, 1999.
    [BibTeX] [PDF]

    3D-city models are becoming an important tool for town planning. Photogrammetry appears to provide the only economic means to acquire truly 3D city data. The paper discusses the current status of automatic and semiautomatic acquisition method. Research in automatic methods for building extraction being increasingly intensive in the last few years has lead to promising results, however, still lacks the performance needed for practical applications. Semiautomatic acquisition methods rely on the ability of the operator to intelligently interpret and select the required information and appear to be ripe for practical implementation.

    @InProceedings{forstner19993d,
    title = {3D-City Models: Automatic and Semiautomatic Acquisition Methods},
    author = {F\"orstner, Wolfgang},
    booktitle = {Photogrammetrische Woche},
    year = {1999},
    address = {Stuttgart},
    abstract = {3D-city models are becoming an important tool for town planning. Photogrammetry appears to provide the only economic means to acquire truly 3D city data. The paper discusses the current status of automatic and semiautomatic acquisition method. Research in automatic methods for building extraction being increasingly intensive in the last few years has lead to promising results, however, still lacks the performance needed for practical applications. Semiautomatic acquisition methods rely on the ability of the operator to intelligently interpret and select the required information and appear to be ripe for practical implementation.},
    city = {Bonn},
    proceeding = {Photogrammetrische Woche},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner19993D.pdf},
    }

  • W. Förstner, “Uncertain Neighborhood Relations of Point Sets and Fuzzy Delaunay Triangulation,” in Proc. of DAGM Symposium Mustererkennung, Bonn, Germany, 1999. doi:10.1007/978-3-642-60243-6_25
    [BibTeX] [PDF]

    Voronoi diagrams are a classical tool for analyzing spatial neighborhood relations. For point fields the spatial proximity can be easily visualized by the dual graph, the Delaunay triangulation. In image analysis VDs and DTs are commonly used to derive neighborhoods for grouping or for relational matching. Neighborhood relations derived from the VD, however, are uncertain in case the common side of two Voronoi cells is comparably short or, equivalently, in case four points of two neighboring triangles in a DT are close to a circle. We propose a measure for characterizing the uncertainty of neighborhoods in a plane point field. As a side result we show the measure to be invariant to the numbering of the four points, though being dependent on the cross ratio of four points. Defining a fuzzy Delaunay triangulation is taken as an example.

    @InProceedings{forstner1999uncertain,
    title = {Uncertain Neighborhood Relations of Point Sets and Fuzzy Delaunay Triangulation},
    author = {F\"orstner, Wolfgang},
    booktitle = {Proc. of DAGM Symposium Mustererkennung},
    year = {1999},
    address = {Bonn, Germany},
    abstract = {Voronoi diagrams are a classical tool for analyzing spatial neighborhood relations. For point fields the spatial proximity can be easily visualized by the dual graph, the Delaunay triangulation. In image analysis VDs and DTs are commonly used to derive neighborhoods for grouping or for relational matching. Neighborhood relations derived from the VD, however, are uncertain in case the common side of two Voronoi cells is comparably short or, equivalently, in case four points of two neighboring triangles in a DT are close to a circle. We propose a measure for characterizing the uncertainty of neighborhoods in a plane point field. As a side result we show the measure to be invariant to the numbering of the four points, though being dependent on the cross ratio of four points. Defining a fuzzy Delaunay triangulation is taken as an example.},
    city = {Bonn},
    doi = {10.1007/978-3-642-60243-6_25},
    proceeding = {Proc. of DAGM Symposium Mustererkennung},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1999Uncertain.pdf},
    }

  • W. Förstner and B. Moonen, “A Metric for Covariance Matrices,” in Festschrift for Erik W. Grafarend on the occasion of his 60th birthday. Also appeared in: Geodesy – The Challenge of the 3rd Millennium (2003, with editors Professor Dr. Erik W. Grafarend, Dr. Friedrich W. Krumm,Dr. Volker S. Schwarze, ISBN: 978-3-642-07733-3 (Print) 978-3-662-05296-9 (Online)), 1999, pp. 113-128.
    [BibTeX] [PDF]

    The paper presents a metric for positive definite covariance matrices. It is a natural expression involving traces and joint eigenvalues of the matrices. It is shown to be the distance coming from a canonical invariant Riemannian metric on the space $Sym^+(n,\mR)$ of real symmetric positive definite matrices In contrast to known measures, collected e.~g. in Grafarend 1972, the metric is invariant under affine transformations and inversion. It can be used for evaluating covariance matrices or for optimization of measurement designs. Keywords: Covariance matrices, metric, Lie groups, Riemannian manifolds, exponential mapping, symmetric spaces

    @InProceedings{forstner1999metric,
    title = {A Metric for Covariance Matrices},
    author = {F\"orstner, Wolfgang and Moonen, Boudewijn},
    booktitle = {Festschrift for Erik W. Grafarend on the occasion of his 60th birthday. Also appeared in: Geodesy - The Challenge of the 3rd Millennium (2003, with editors Professor Dr. Erik W. Grafarend, Dr. Friedrich W. Krumm,Dr. Volker S. Schwarze, ISBN: 978-3-642-07733-3 (Print) 978-3-662-05296-9 (Online))},
    year = {1999},
    editor = {Krumm, F. and Schwarze, V. S.},
    pages = {113-128},
    abstract = {The paper presents a metric for positive definite covariance matrices. It is a natural expression involving traces and joint eigenvalues of the matrices. It is shown to be the distance coming from a canonical invariant Riemannian metric on the space $Sym^+(n,\mR)$ of real symmetric positive definite matrices In contrast to known measures, collected e.~g. in Grafarend 1972, the metric is invariant under affine transformations and inversion. It can be used for evaluating covariance matrices or for optimization of measurement designs. Keywords: Covariance matrices, metric, Lie groups, Riemannian manifolds, exponential mapping, symmetric spaces},
    city = {Bonn},
    proceeding = {Quo vadis geodesia ...?, Festschrift for Erik W. Grafarend on the occasion of his 60th birthday},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1999Metric.pdf},
    }

  • A. Faber, “Scale Characteristics of Local Autocovariances for Texture Segmentation,” International Archives of Photogrammetrie and Remote Sensing, vol. 32, 1999.
    [BibTeX] [PDF]

    We want to extract urban structures from aerial images or high resolution satellite scenes like German MOMS02 sensor. We aim at a separation of neighbored textured regions importend for describing different urban structures, however, do not intend to reach a full partitioning of the image. It has been shown, that grey level segmentation alone is not sufficient to approach this problem. Because some kinds of segmentation errors, like over- or undersegmentation, are possible. If we want to use texture additionally, the development of a suitable representation of texture images is required. We use the scale characteristics of the local autocovariance function, called SCAF, of the possibly multiband image function. This characteristics is derived in two steps: * The parameters strength, direction and anisotropy of the texture are derived from the squared gradient of the image function. * The spatial frequency content of these features is then determined using a Laplacian pyramid, which separates the different spectral bands of the texture. The process goes on with analyzing eigenvalues of the negative Hessian as an approximation of autocovariance function. So we obtain the three described texture parameters strength, direction and anisotropy. For these parameters we then compute the Laplace pyramid representing the spectral decomposition of the signal. For processing textured images in the described way, we fuse all image channels by integrating the squared gradient of the texture features in all channels of the image pyramid again. The normalization uses the theoretical noise behavior of the filter kernels of the Laplace pyramid, which we determined by analyzing the impulse response. The final result of the process then are the texture edges. The paper contains a presentation of our method and results for artificial and natural scenes.

    @Article{faber1999scale,
    title = {Scale Characteristics of Local Autocovariances for Texture Segmentation},
    author = {Faber, Anette},
    journal = {International Archives of Photogrammetrie and Remote Sensing},
    year = {1999},
    volume = {32},
    abstract = {We want to extract urban structures from aerial images or high resolution satellite scenes like German MOMS02 sensor. We aim at a separation of neighbored textured regions importend for describing different urban structures, however, do not intend to reach a full partitioning of the image. It has been shown, that grey level segmentation alone is not sufficient to approach this problem. Because some kinds of segmentation errors, like over- or undersegmentation, are possible. If we want to use texture additionally, the development of a suitable representation of texture images is required. We use the scale characteristics of the local autocovariance function, called SCAF, of the possibly multiband image function. This characteristics is derived in two steps: * The parameters strength, direction and anisotropy of the texture are derived from the squared gradient of the image function. * The spatial frequency content of these features is then determined using a Laplacian pyramid, which separates the different spectral bands of the texture. The process goes on with analyzing eigenvalues of the negative Hessian as an approximation of autocovariance function. So we obtain the three described texture parameters strength, direction and anisotropy. For these parameters we then compute the Laplace pyramid representing the spectral decomposition of the signal. For processing textured images in the described way, we fuse all image channels by integrating the squared gradient of the texture features in all channels of the image pyramid again. The normalization uses the theoretical noise behavior of the filter kernels of the Laplace pyramid, which we determined by analyzing the impulse response. The final result of the process then are the texture edges. The paper contains a presentation of our method and results for artificial and natural scenes.},
    part = {7-4-3 W6},
    url = {https://www.ipb.uni-bonn.de/pdfs/Faber1999Scale.pdf},
    }

  • A. Faber, “Skalencharakteristik der Lokalen Autokovarianzfunktion für die Textursegmentierung,” Institut für Photogrammetrie 1999.
    [BibTeX] [PDF]

    \textbf{Summary} This report describes research on the extraction of urban structures from aerial images or hight resolution satellite scenes. We aim at a separation of neighboring textured regions important for different urban structures. It has been shown, that grey level segmentation alone s not sufficient to solve this problem. If we want to use texture additionally, the development of a suitable representation of texture images is required. We use the scale characteristics of the local autocovariance function, called SCAF, of the possibly multiband image function. The final result of the process are textrure edges. \textbf{Zusammenfassung} Dieser Bericht beschreibt den aktuellen Stand der Forschung auf dem Gebiet der Extraktion urbaner Strukturen aus Luft- bzw. hochauflösenden Satellitendaten am Institut für Photogrammetrie. Die Trennung (Segmentierung) benachbarter texturierter, für die Beschreibung solcher urbaner Strukturen bedeutender, Regionen ist das Ziel der Arbeit. Es hat sich gezeigt, dass Segmentierungsverfahren auf Intensitätsbasis allein oft unzureichende Ergebnisse liefern. Deshalb wurde eine Repräsentationsform für Bilddaten entwickelt, die es erlaubt, zusätzlich Texturinformationen für die Problemlösung zu nutzen. Dies erfolg unter Verwendung der Skalencharakteristik der Lokalen Autokovarianzfunktion, kurz SCAF, der Bildfunktion. Das Ergebnis des vorgestellten Prozesses sind Texturkanten.

    @TechReport{faber1999skalencharakteristik,
    title = {Skalencharakteristik der Lokalen Autokovarianzfunktion f\"ur die Textursegmentierung},
    author = {Faber, Anette},
    institution = {Institut f\"ur Photogrammetrie},
    year = {1999},
    abstract = {\textbf{Summary} This report describes research on the extraction of urban structures from aerial images or hight resolution satellite scenes. We aim at a separation of neighboring textured regions important for different urban structures. It has been shown, that grey level segmentation alone s not sufficient to solve this problem. If we want to use texture additionally, the development of a suitable representation of texture images is required. We use the scale characteristics of the local autocovariance function, called SCAF, of the possibly multiband image function. The final result of the process are textrure edges. \textbf{Zusammenfassung} Dieser Bericht beschreibt den aktuellen Stand der Forschung auf dem Gebiet der Extraktion urbaner Strukturen aus Luft- bzw. hochaufl\"osenden Satellitendaten am Institut f\"ur Photogrammetrie. Die Trennung (Segmentierung) benachbarter texturierter, f\"ur die Beschreibung solcher urbaner Strukturen bedeutender, Regionen ist das Ziel der Arbeit. Es hat sich gezeigt, dass Segmentierungsverfahren auf Intensit\"atsbasis allein oft unzureichende Ergebnisse liefern. Deshalb wurde eine Repr\"asentationsform f\"ur Bilddaten entwickelt, die es erlaubt, zus\"atzlich Texturinformationen f\"ur die Probleml\"osung zu nutzen. Dies erfolg unter Verwendung der Skalencharakteristik der Lokalen Autokovarianzfunktion, kurz SCAF, der Bildfunktion. Das Ergebnis des vorgestellten Prozesses sind Texturkanten.},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Faber1999Technischer.pdf},
    }

  • P. Faber, “Nonparametric segmentation of 2D curves,” in Institut für Photogrammetrie, 1999.
    [BibTeX] [PDF]

    There is a considerable interest in the computer vision and computer graphics community to generate a suitable symbolic description of 2D curves. This paper presents a new approach to generate a compact description by a nonparametric segmentation algorithm. Important is that no thresholds are required to determine the segmentation, which best describes a 2D curve. The result is a symbolic description by a set of features of different order. The emphasis is on the used significance measure referring to the limits of an acceptable interpretableness of the reachable sementation results. the proposed algorithm has a number of interesting properties: (1) independence of the segmentation from any parameters, (2) invariance to geometric transformations, (3) simplicity, and (4) efficiency of the segmentation algorithm.

    @InProceedings{faber1999nonparametric,
    title = {Nonparametric segmentation of 2D curves},
    author = {Faber, Petko},
    booktitle = {Institut f\"ur Photogrammetrie},
    year = {1999},
    abstract = {There is a considerable interest in the computer vision and computer graphics community to generate a suitable symbolic description of 2D curves. This paper presents a new approach to generate a compact description by a nonparametric segmentation algorithm. Important is that no thresholds are required to determine the segmentation, which best describes a 2D curve. The result is a symbolic description by a set of features of different order. The emphasis is on the used significance measure referring to the limits of an acceptable interpretableness of the reachable sementation results. the proposed algorithm has a number of interesting properties: (1) independence of the segmentation from any parameters, (2) invariance to geometric transformations, (3) simplicity, and (4) efficiency of the segmentation algorithm.},
    city = {Bonn},
    proceeding = {Institut f\"ur Photogrammetrie},
    url = {https://www.ipb.uni-bonn.de/pdfs/Faber1999Nonparametric.pdf},
    }

  • P. Faber, “Parameterlose Kontursegmentierung,” in Proc. of DAGM Symposium Mustererkennung, Bonn, Germany, 1999, p. 172–180. doi:10.1007/978-3-642-60243-6_20
    [BibTeX] [PDF]

    In diesem Beitrag wird ein Ansatz zur Segmentierung einer aus einer ikonischen Beschreibung eines Bildes extrahierten Kontur in eine Menge von Kurven verschiedener Ordnungen vorgestellt. Die Funktion des allgemeinen Ansatzes wird anhand der Zerlegung einer Kontur in eine Menge von Geraden- und Ellipsensegmente gezeigt. Charakteristische Eigenschaften dieses Ansatz sind: (1) die parameterlose Segmentierung, (2) die Invarianz gegenüber geometrischen Transformationen, (3) die Simplizität des gewühlten Ansatzes und (4) die verhältnismäßig effiziente Segmentierung.

    @InProceedings{faber1999parameterlose,
    title = {Parameterlose Kontursegmentierung},
    author = {Faber, Petko},
    booktitle = {Proc. of DAGM Symposium Mustererkennung},
    year = {1999},
    address = {Bonn, Germany},
    pages = {172--180},
    abstract = {In diesem Beitrag wird ein Ansatz zur Segmentierung einer aus einer ikonischen Beschreibung eines Bildes extrahierten Kontur in eine Menge von Kurven verschiedener Ordnungen vorgestellt. Die Funktion des allgemeinen Ansatzes wird anhand der Zerlegung einer Kontur in eine Menge von Geraden- und Ellipsensegmente gezeigt. Charakteristische Eigenschaften dieses Ansatz sind: (1) die parameterlose Segmentierung, (2) die Invarianz gegen\"uber geometrischen Transformationen, (3) die Simplizit\"at des gew\"uhlten Ansatzes und (4) die verh\"altnism\"a{\ss}ig effiziente Segmentierung.},
    city = {Bonn},
    doi = {10.1007/978-3-642-60243-6_20},
    proceeding = {Proc. of DAGM Symposium Mustererkennung},
    url = {https://www.ipb.uni-bonn.de/pdfs/Faber1999Parameterlose.pdf},
    }

  • P. Faber, “Relaxationsverfahren in der Mustererkennung: Ein allgemeiner Ansatz,” PhD Thesis, 1999.
    [BibTeX]

    \textbf{Summary} The purpose of this thesis is to make a contribution to the theoretical bases of relaxation processes in pattern recognition with particular emphasis on the development of a general approach to describe and design relaxation processes. Many approaches to analyze and describe relaxation processes are suggested in the literature, but the possibilities to construct application-dependent relaxation processes are frequently pointed out only insufficiently. And here the thesis starts. The difference, in contrast to other approaches, is in the general validity of the chosen approach to describe the set of all relaxation processes. This means nothing else, except that the formulation of the approach is on the one hand independent from a concrete calculation and thus on the other hand it can be transfered relatively easily to each iterative formulatable application. The advantage of such a generally valid representation is therefore a) in the possibility to describe the set of all relaxation processes within a closed theory and b) in the reduction of the up to now necessary effort to construct a suitable relaxation process for a concrete task with a better motivation of the chosen approach. But with an increasing generalization also the possibility of the view on individual details will be lost. This will be reflected within the thesis in the reference to a special calculus or the statements on the convergence behavior. The emphasis of the predominant theoretical considerations in the first part of this thesis is the structual subdivision of relaxation processes into three phases: initialization, updating and evaluation, whereby investigations of the updating phase are in the foreground. Regarding the different components of the updating phase, general regularities will be worked out which will permit to distinguish between all relaxation processes and all common iterative processes. Apart from the theoretical results the developed approach also has direct practical significance. In the second part of this thesis the validity of the developed structure will be illustrated by a complex example, the reconstruction of polymorphic 3D objects. The validity is demonstrated through continuous inclusion of relaxation into the process of generating a solution. The development of a structure for the description and construction of relaxation processes makes other investigations possible. They may either require a detailed analysis of individual aspects of the structure or be of a fundamental nature. Here the investigation of the convergence behavior and of the significance of different calculations in context of the updating step will be interesting and also necessary. \textbf{Zusammenfassung} Die Arbeit will einen Beitrag zu den theoretischen Grundlagen der Relaxationsverfahren in der Mustererkennung leisten. Dabei steht die Entwicklung eines allgemeinen Ansatzes zur Beschreibung und Konstruktion von Relaxationsverfahren in der Mustererkennung im Vordergrund. Ansätze zur Analyse und Beschreibung von Relaxationsverfahren sind aus der Literatur hinreichend bekannt; die Möglichkeiten einer Konstruktion applikationsabhängiger Relaxationsverfahren werden jedoch häufig nur unzureichend aufgezeigt. An dieser Stelle setzt die Arbeit an. Der Unterschied gegenüber anderen Ansätzen liegt dabei in der Allgemeingültigkeit des gewählten Ansatzes zur Beschreibung der Menge aller Relaxationsverfahren, was letztlich nichts anderes bedeutet, als dass die Formulierung des Ansatzes einerseits unabhängig von einem konkreten Kalkül erfolgt und somit andererseits relativ einfach auf jede iterativ formulierbare Applikation der Mustererkennung übertragen werden kann. Der Vorteil einer solchen allgemeingültigen Darstellung liegt dabei a) in der Möglichkeit der Beschreibung der Menge aller Relaxationsverfahren innerhalb einer geschlossenen Theorie und b) in einer Verringerung des bisher notwendigen Aufwandes bei der Konstruktion eines geeigneten Relaxationsverfahrens für eine konkrete Aufgabenstellung bei einer gleichzeitig verbesserten Möglichkeit der Motivation des jeweils konkret gewählten Ansatzes. Mit zunehmender Verallgemeinerung geht allerdings auch die Möglichkeit der Sicht auf einzelne Details verloren, was sich innerhalb der Arbeit konkret im Bezug zu einem speziellen Kalkül oder den Aussagen zum Konvergenzverhalten widerspiegelt. Schwerpunkt der überwiegend theoretischen Überlegungen im ersten Teil der Arbeit bildet die strukturelle Untergliederung der Relaxationsverfahren in die drei Phasen Initialisierung, Aktualisierung und Evaluierung, wobei die Aktualisierung als wesentliches Differenzierungsmerkmal gegenüber “gewöhnlichen” Iterationsverfahren im Mittelpunkt steht. In Bezug auf die verschiedenen Bestandteile der Aktualisierungsphase werden allgemeingültige Gesetzmässigkeiten herausgearbeitet, die letztlich eine genaue Abgrenzung der Menge aller Relaxationsverfahren von der Menge aller iterativen Verfahren gestatten. Neben den theoretischen Ergebnissen besitzt der in der Arbeit entwickelte Ansatz unmittelbare praktische Bedeutung. Anhand eines komplexen Beispiels, der Rekonstruktion polymorpher 3D-Objekte, wird im zweiten Teil der Arbeit die Gültigkeit der entwickelten Struktur durch eine durchgängige Einbeziehung von Relaxationsverfahren in den Prozess der Generierung einer Lösung demonstriert. Aufbauend auf der in der Arbeit entwickelten Struktur zur Beschreibung und Konstruktion von Relaxationsverfahren sind weitere Untersuchungen denkbar, die einerseits eine detailliertere Analyse einzelner Aspekte der Struktur verlangen, andererseits grundsätzlicher Natur sind. Dabei erscheint die Untersuchung sowohl des Konvergenzverhaltens als auch der Bedeutung verschiedener Kalküle im Kontext der Aktualisierung ebenso interessant wie notwendig.

    @PhDThesis{faber1999relaxationsverfahren,
    title = {Relaxationsverfahren in der Mustererkennung: Ein allgemeiner Ansatz},
    author = {Faber, Petko},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1999},
    note = {Shaker Verlag, Aachen 1999},
    abstract = {\textbf{Summary} The purpose of this thesis is to make a contribution to the theoretical bases of relaxation processes in pattern recognition with particular emphasis on the development of a general approach to describe and design relaxation processes. Many approaches to analyze and describe relaxation processes are suggested in the literature, but the possibilities to construct application-dependent relaxation processes are frequently pointed out only insufficiently. And here the thesis starts. The difference, in contrast to other approaches, is in the general validity of the chosen approach to describe the set of all relaxation processes. This means nothing else, except that the formulation of the approach is on the one hand independent from a concrete calculation and thus on the other hand it can be transfered relatively easily to each iterative formulatable application. The advantage of such a generally valid representation is therefore a) in the possibility to describe the set of all relaxation processes within a closed theory and b) in the reduction of the up to now necessary effort to construct a suitable relaxation process for a concrete task with a better motivation of the chosen approach. But with an increasing generalization also the possibility of the view on individual details will be lost. This will be reflected within the thesis in the reference to a special calculus or the statements on the convergence behavior. The emphasis of the predominant theoretical considerations in the first part of this thesis is the structual subdivision of relaxation processes into three phases: initialization, updating and evaluation, whereby investigations of the updating phase are in the foreground. Regarding the different components of the updating phase, general regularities will be worked out which will permit to distinguish between all relaxation processes and all common iterative processes. Apart from the theoretical results the developed approach also has direct practical significance. In the second part of this thesis the validity of the developed structure will be illustrated by a complex example, the reconstruction of polymorphic 3D objects. The validity is demonstrated through continuous inclusion of relaxation into the process of generating a solution. The development of a structure for the description and construction of relaxation processes makes other investigations possible. They may either require a detailed analysis of individual aspects of the structure or be of a fundamental nature. Here the investigation of the convergence behavior and of the significance of different calculations in context of the updating step will be interesting and also necessary. \textbf{Zusammenfassung} Die Arbeit will einen Beitrag zu den theoretischen Grundlagen der Relaxationsverfahren in der Mustererkennung leisten. Dabei steht die Entwicklung eines allgemeinen Ansatzes zur Beschreibung und Konstruktion von Relaxationsverfahren in der
    Mustererkennung im Vordergrund. Ans\"atze zur Analyse und Beschreibung von Relaxationsverfahren sind aus der Literatur hinreichend bekannt; die M\"oglichkeiten einer Konstruktion applikationsabh\"angiger Relaxationsverfahren werden jedoch h\"aufig nur unzureichend aufgezeigt. An dieser Stelle setzt die Arbeit an. Der Unterschied gegen\"uber anderen Ans\"atzen liegt dabei in der Allgemeing\"ultigkeit des gew\"ahlten Ansatzes zur Beschreibung der Menge aller Relaxationsverfahren, was letztlich nichts anderes bedeutet, als dass die Formulierung des Ansatzes einerseits unabh\"angig von einem konkreten Kalk\"ul erfolgt und somit andererseits relativ einfach auf jede iterativ formulierbare Applikation der Mustererkennung \"ubertragen werden kann. Der Vorteil einer solchen allgemeing\"ultigen Darstellung liegt dabei a) in der M\"oglichkeit der Beschreibung der Menge aller Relaxationsverfahren innerhalb einer geschlossenen Theorie und b) in einer Verringerung des bisher notwendigen Aufwandes bei der Konstruktion eines geeigneten Relaxationsverfahrens f\"ur eine konkrete Aufgabenstellung bei einer gleichzeitig verbesserten M\"oglichkeit der Motivation des jeweils konkret gew\"ahlten Ansatzes. Mit zunehmender Verallgemeinerung geht allerdings auch die M\"oglichkeit der Sicht auf einzelne Details verloren, was sich innerhalb der Arbeit konkret im Bezug zu einem speziellen Kalk\"ul oder den Aussagen zum Konvergenzverhalten widerspiegelt. Schwerpunkt der \"uberwiegend theoretischen \"Uberlegungen im ersten Teil der Arbeit bildet die strukturelle Untergliederung der Relaxationsverfahren in die drei Phasen Initialisierung, Aktualisierung und Evaluierung, wobei die Aktualisierung als wesentliches Differenzierungsmerkmal gegen\"uber "gew\"ohnlichen" Iterationsverfahren im Mittelpunkt steht. In Bezug auf die verschiedenen Bestandteile der Aktualisierungsphase werden allgemeing\"ultige Gesetzm\"assigkeiten herausgearbeitet, die letztlich eine genaue Abgrenzung der Menge aller Relaxationsverfahren von der Menge aller iterativen Verfahren gestatten. Neben den theoretischen Ergebnissen besitzt der in der Arbeit entwickelte Ansatz unmittelbare praktische Bedeutung. Anhand eines komplexen Beispiels, der Rekonstruktion polymorpher 3D-Objekte, wird im zweiten Teil der Arbeit die G\"ultigkeit der entwickelten Struktur durch eine durchg\"angige Einbeziehung von Relaxationsverfahren in den Prozess der Generierung einer L\"osung demonstriert. Aufbauend auf der in der Arbeit entwickelten Struktur zur Beschreibung und Konstruktion von Relaxationsverfahren sind weitere Untersuchungen denkbar, die einerseits eine detailliertere Analyse einzelner Aspekte der Struktur verlangen, andererseits grunds\"atzlicher Natur sind. Dabei erscheint die Untersuchung sowohl des Konvergenzverhaltens als auch der Bedeutung verschiedener Kalk\"ule im Kontext der Aktualisierung ebenso interessant wie notwendig.},
    }

  • A. Fischer, T. H. Kolbe, and F. Lang, “On the Use of Geometric and Semantic Models for Component-Based Building Reconstruction,” in Semantic Modeling for the Acquisition of Topographic Information from Images and Maps, Smati ’99 Workshop, Institut für Photogrammetrie, Universität Bonn, 1999, p. 101–119.
    [BibTeX] [PDF]

    3D building data is needed in many application areas. Besides the geometric description an increasing number of applications also demand thematic information about acquired buildings. We present a concept for the automatic extraction of buildings from aerial images. In contrast to other approaches generic building structures both are geometrically reconstructed and semantically classified. A component-based, parameterized building model is employed to control the reconstruction of buildings. This paper describes how geometric and semantic knowledge of buildings is propagated through the different aggregation levels of the building model. Furthermore, it is shown how rules and constraints are derived from the model and exploited at each stage of the reconstruction process.

    @InProceedings{fischer1999use,
    title = {On the Use of Geometric and Semantic Models for Component-Based Building Reconstruction},
    author = {Fischer, Andr\'e and Kolbe, Thomas H. and Lang, Felicitas},
    booktitle = {Semantic Modeling for the Acquisition of Topographic Information from Images and Maps, Smati '99 Workshop},
    year = {1999},
    address = {Institut f\"ur Photogrammetrie, Universit\"at Bonn},
    editor = {F\"orstner, Wolfgang and Lidtke, C. -E. and B\"uckner J.},
    pages = {101--119},
    abstract = {3D building data is needed in many application areas. Besides the geometric description an increasing number of applications also demand thematic information about acquired buildings. We present a concept for the automatic extraction of buildings from aerial images. In contrast to other approaches generic building structures both are geometrically reconstructed and semantically classified. A component-based, parameterized building model is employed to control the reconstruction of buildings. This paper describes how geometric and semantic knowledge of buildings is propagated through the different aggregation levels of the building model. Furthermore, it is shown how rules and constraints are derived from the model and exploited at each stage of the reconstruction process.},
    city = {Bonn},
    proceeding = {Proceedings: Semantic Modeling for the Acquisition of Topographic Information from Images and Maps, Smati 99 Workshop},
    url = {https://www.ipb.uni-bonn.de/pdfs/Fischer1999Use.pdf},
    }

  • E. Gülch, H. Müller, and T. Läbe, “Integration of Automatic Processes Into Semi-Automatic Building Extraction,” in Proc. of ISPRS Conf. “Automatic Extraction Of GIS Objects From Digital Imagery”, 1999.
    [BibTeX] [PDF]

    The modeling of three-dimensional objects is a current topic in digital photogrammetric research. The modeling of buildings in digital imagery or digital surface models involving automation processes has reached a level where it can compete with classical photogrammetric stereo measurements. There are many different ways on how to integrate automation. We describe our system and its automated features that support the operator in the adaption of parametric models to multiple overlapping images. There do exist tools to automate the measurement of heights, to automate the estimation of the form parameters or for the handling of building aggregates. With such tools we can reach about 20 seconds for the modeling of a volumetric primitive which is fully comparable to the currently used photogrammetric methods.

    @InProceedings{gulch1999integration,
    title = {Integration of Automatic Processes Into Semi-Automatic Building Extraction},
    author = {G\"ulch, Eberhard and M\"uller, Hardo and L\"abe, Thomas},
    booktitle = {Proc. of ISPRS Conf. "Automatic Extraction Of GIS Objects From Digital Imagery"},
    year = {1999},
    abstract = {The modeling of three-dimensional objects is a current topic in digital photogrammetric research. The modeling of buildings in digital imagery or digital surface models involving automation processes has reached a level where it can compete with classical photogrammetric stereo measurements. There are many different ways on how to integrate automation. We describe our system and its automated features that support the operator in the adaption of parametric models to multiple overlapping images. There do exist tools to automate the measurement of heights, to automate the estimation of the form parameters or for the handling of building aggregates. With such tools we can reach about 20 seconds for the modeling of a volumetric primitive which is fully comparable to the currently used photogrammetric methods.},
    city = {Bonn},
    proceeding = {Proc. of ISPRS Conf. #Automatic##Extraction##Of##GIS##Objects##From##Digital##Imagery#},
    url = {https://www.ipb.uni-bonn.de/pdfs/Gulch1999Integration.pdf},
    }

  • S. Heuel, “Topological and Geometrical Reasoning in 3D Grouping for Reconstructing Polyhedral Surfaces,” in Institut für Photogrammetrie, 1999.
    [BibTeX]

    We are developing a system for reconstructing polyhedral surfaces from multiple images. This process can take advantage of the topological relations of the observed image features triggering and therefore speeding up the grouping processes. Exploiting the statistical properties of features when grouping them leads to consistent decisions being invariant to numbering and choice of coordinate system and allows statistical testing. This simplifies the choice of thresholds to the definition of a scene independent significance level. We decribe the topological and statistical models used within our system. Experiments with synthetic and real data prove the feasibility of the approach.

    @InProceedings{heuel1999topological,
    title = {Topological and Geometrical Reasoning in 3D Grouping for Reconstructing Polyhedral Surfaces},
    author = {Heuel, Stephan},
    booktitle = {Institut f\"ur Photogrammetrie},
    year = {1999},
    abstract = {We are developing a system for reconstructing polyhedral surfaces from multiple images. This process can take advantage of the topological relations of the observed image features triggering and therefore speeding up the grouping processes. Exploiting the statistical properties of features when grouping them leads to consistent decisions being invariant to numbering and choice of coordinate system and allows statistical testing. This simplifies the choice of thresholds to the definition of a scene independent significance level. We decribe the topological and statistical models used within our system. Experiments with synthetic and real data prove the feasibility of the approach.},
    }

  • S. Heuel and A. Faber, “Technical Report – Segmentation and Classification of Landcover Areas using a Polymorphic Feature Extraction,” Institut für Photogrammetrie 1999.
    [BibTeX] [PDF]

    This report demonstrates the use of FEX, cf ? for segmenting land-use units from remote-sensed images and their classification to meaningful clusters(?!). Two approaches for segmenting land-use units are proposed, one is based on symbolic data and one is based on iconic data. Advantages and disadvantages of both methods are discussed. Problems of the method and the output of FEX, which appeared during this work are discussed. The classification is based on a linear classifer, which supplies classified areas according to their agricultural use. Results, demonstrating the the feasibility of the process, are shown and discussed.

    @TechReport{heuel1999technical,
    title = {Technical Report - Segmentation and Classification of Landcover Areas using a Polymorphic Feature Extraction},
    author = {Heuel, Stephan and Faber, Anette},
    institution = {Institut f\"ur Photogrammetrie},
    year = {1999},
    abstract = {This report demonstrates the use of FEX, cf ? for segmenting land-use units from remote-sensed images and their classification to meaningful clusters(?!). Two approaches for segmenting land-use units are proposed, one is based on symbolic data and one is based on iconic data. Advantages and disadvantages of both methods are discussed. Problems of the method and the output of FEX, which appeared during this work are discussed. The classification is based on a linear classifer, which supplies classified areas according to their agricultural use. Results, demonstrating the the feasibility of the process, are shown and discussed.},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Heuel1999Technical.pdf},
    }

  • A. Kort, “Modellbasierte Gebäudeerkennung in Luftbildern,” Diplomarbeit Master Thesis, 1999.
    [BibTeX]
    [none]
    @MastersThesis{kort1999modellbasierte,
    title = {Modellbasierte Geb\"audeerkennung in Luftbildern},
    author = {Kort, Alexander},
    school = {Institute of Photogrammetry, University of Bonn In Zusammenarbeit mit dem Institut f\"ur Informatik der Universit\"at Bonn},
    year = {1999},
    note = {Betreuung: Prof. Dr. Armin B. Cremers, Prof. Dr.-Ing. Wolfgang F\"orstner},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • T. Läbe, “Contribution to the OEEPE-Test on Automatic Orientation of Aerial Images, Task A – Experiences with AMOR,” in OEEPE Seminar on Automatic Orientation of Aerial Images on Database Information, Aalborg, Denmark, 1999.
    [BibTeX] [PDF]

    This paper describes the contribution of the University of Bonn to the OEEPE test on Automatic Orientation of Aerial Images (Task A). A program for the automatic exterior orientation called AMOR was developed by Wolfgang Schickler at the Institute of Photogrammetry, Bonn. The methods and ideas of this approach are summarized. This program was used to compute the exterior orientation parameters of the given two test images successfully. Results and new solved problems are reported.

    @InProceedings{labe1999contribution,
    title = {Contribution to the OEEPE-Test on Automatic Orientation of Aerial Images, Task A - Experiences with AMOR},
    author = {L\"abe, Thomas},
    booktitle = {OEEPE Seminar on Automatic Orientation of Aerial Images on Database Information},
    year = {1999},
    address = {Aalborg, Denmark},
    abstract = {This paper describes the contribution of the University of Bonn to the OEEPE test on Automatic Orientation of Aerial Images (Task A). A program for the automatic exterior orientation called AMOR was developed by Wolfgang Schickler at the Institute of Photogrammetry, Bonn. The methods and ideas of this approach are summarized. This program was used to compute the exterior orientation parameters of the given two test images successfully. Results and new solved problems are reported.},
    city = {Bonn},
    proceeding = {OEEPE Seminar on Automatic Orientation of Aerial Images on Database Information},
    url = {https://www.ipb.uni-bonn.de/pdfs/Labe1999Contribution.pdf},
    }

  • L. Ragia and W. Förstner, “Automatically Assessing the Geometric and Structural Quality of Building Ground Plans,” in Proc. of ISPRS Working Group II/6, International Workshop on 3D Geospatial Data, Paris, France, 1999.
    [BibTeX] [PDF]

    The paper develops an approach for assessing the quality of ground plans of buildings. Quality is measured not only by geometrical but also by structural differences between an acquired data set and a reference data set. New hybrid techniques for automatically determining quality measures are developed, and shown to be applicable to real data. The uncertainty of the given data is taken into account. Automating quality assessment increases efficiency in checking data, allowing complete checks instead of sampling, moreover it makes quality checks objective. The developped techniques are applicable to sets of 2D regions of any type and internal structure. We also demonstrate the necessity to use the quality of the quality parameters when checking the fullfillment of quality specifications.

    @InProceedings{ragia1999automatically,
    title = {Automatically Assessing the Geometric and Structural Quality of Building Ground Plans},
    author = {Ragia, Lemonia and F\"orstner, Wolfgang},
    booktitle = {Proc. of ISPRS Working Group II/6, International Workshop on 3D Geospatial Data},
    year = {1999},
    address = {Paris, France},
    organization = {Meeting Application requirements},
    abstract = {The paper develops an approach for assessing the quality of ground plans of buildings. Quality is measured not only by geometrical but also by structural differences between an acquired data set and a reference data set. New hybrid techniques for automatically determining quality measures are developed, and shown to be applicable to real data. The uncertainty of the given data is taken into account. Automating quality assessment increases efficiency in checking data, allowing complete checks instead of sampling, moreover it makes quality checks objective. The developped techniques are applicable to sets of 2D regions of any type and internal structure. We also demonstrate the necessity to use the quality of the quality parameters when checking the fullfillment of quality specifications.},
    city = {Bonn},
    proceeding = {Proc. of ISPRS Working Group II/6, International Workshop on 3D Geospatial Data Production: Meeting Application requirements},
    url = {https://www.ipb.uni-bonn.de/pdfs/Ragia1999Automatically.pdf},
    }

1998

  • S. Abraham and W. Förstner, “Calibration Errors in Structure from Motion,” in Mustererkennung 1998, 20. DAGM-Symposium, Stuttgart, 1998, p. 117–124. doi:10.1007/978-3-642-72282-0_11
    [BibTeX] [PDF]

    In this paper we investigate the relation between camera calibration and structure from motion. A method is presented to analyze the effect of systematic errors and uncertainty in camera calibration on 3D-reconstruction and motion parameters. In two simple examples from stereo with lateral and forward motion the approach is demonstrated. The approach can easily be extended to more complex situations and used for planning and online diagnostics in calibration and structure from motion.

    @InProceedings{abraham1998calibrationa,
    title = {Calibration Errors in Structure from Motion},
    author = {Abraham, Steffen and F\"orstner, Wolfgang},
    booktitle = {Mustererkennung 1998, 20. DAGM-Symposium},
    year = {1998},
    address = {Stuttgart},
    editor = {Levi, P. and May, F. and Ahlers, R.-J. and Schanz, M.},
    pages = {117--124},
    abstract = {In this paper we investigate the relation between camera calibration and structure from motion. A method is presented to analyze the effect of systematic errors and uncertainty in camera calibration on 3D-reconstruction and motion parameters. In two simple examples from stereo with lateral and forward motion the approach is demonstrated. The approach can easily be extended to more complex situations and used for planning and online diagnostics in calibration and structure from motion.},
    city = {Bonn},
    doi = {10.1007/978-3-642-72282-0_11},
    proceeding = {Mustererkennung 1998, 20. DAGM-Symposium},
    url = {https://www.ipb.uni-bonn.de/pdfs/Abraham1998Calibration.pdf},
    }

  • S. Abraham and W. Förstner, “Calibration Errors in Structure from Motion Estimation.” 1998.
    [BibTeX]

    The paper presents methods for sensitivity analysis applied to the relation between camera calibration and structure from motion. The uncertainty of the calibration is represented by the bias and the covariance matrix of the calibration parameters, describing the effect of incomplete modeling and random errors during calibration. The effect of calibration errors onto the 3D structure is analyzed for stereo in lateral and forward motion. The results reveal interesting relations between stability and sensitivity and demonstrate the need for a rigorous statistical analysis which takes into account all mutual stochastical dependencies. As a side result, the comparison of two different calibration models, namely Tsai’s model and a new polynomial model, demonstrates the limitations of Tsai’s model.

    @InProceedings{abraham1998calibration,
    title = {Calibration Errors in Structure from Motion Estimation},
    author = {Abraham, Steffen and F\"orstner, Wolfgang},
    booktitle = eccv,
    year = {1998},
    abstract = {The paper presents methods for sensitivity analysis applied to the relation between camera calibration and structure from motion. The uncertainty of the calibration is represented by the bias and the covariance matrix of the calibration parameters, describing the effect of incomplete modeling and random errors during calibration. The effect of calibration errors onto the 3D structure is analyzed for stereo in lateral and forward motion. The results reveal interesting relations between stability and sensitivity and demonstrate the need for a rigorous statistical analysis which takes into account all mutual stochastical dependencies. As a side result, the comparison of two different calibration models, namely Tsai's model and a new polynomial model, demonstrates the limitations of Tsai's model.},
    }

  • A. Brunn, F. Lang, E. Gülch, and W. Förstner, “A Hybrid concept for 3D Building Acquisition,” in Journal for Photogrammetry & Remote Sensing, 1998, p. 119–129. doi:10.1016/S0924-2716(97)00039-7
    [BibTeX] [PDF]

    This paper presents a hybrid concept of interaction between scene and sensors for image interpretation. We present a strategy for 3D building acquisition which combines different approaches based on different levels of description and different sensors: the detection of regions of interest, and the automatic and semiautomatic reconstruction of object parts and complete buildings.

    @InProceedings{brunn1998hybrid,
    title = {A Hybrid concept for 3D Building Acquisition},
    author = {Brunn, Ansgar and Lang, Felicitas and G\"ulch, Eberhard and F\"orstner, Wolfgang},
    booktitle = {Journal for Photogrammetry \& Remote Sensing},
    year = {1998},
    pages = {119--129},
    volume = {53},
    abstract = {This paper presents a hybrid concept of interaction between scene and sensors for image interpretation. We present a strategy for 3D building acquisition which combines different approaches based on different levels of description and different sensors: the detection of regions of interest, and the automatic and semiautomatic reconstruction of object parts and complete buildings.},
    city = {Bonn},
    doi = {10.1016/S0924-2716(97)00039-7},
    proceeding = {Journal for Photogrammetry & Remote Sensing Vol. 53},
    url = {https://www.ipb.uni-bonn.de/pdfs/Brunn1998Hybrid.pdf},
    }

  • A. Brunn and U. Weidner, “Hierarchical Bayesian Nets for Building Extraction Using Dense Digital Surface Models,” Journal for Photogrammetry & Remote Sensing, vol. 53, p. 296–307, 1998. doi:10.1016/S0924-2716(98)00012-4
    [BibTeX]

    During the last years an increasing demand for 3D data of urban scenes can be recognized. Techniques for automatic acquisition of buildings are needed to satisfy this demand in an economic way. This paper describes an approach for building extraction using digital surface models (DSM) as input data. The first task is the detection of areas within the DSM which describe buildings. The second task is the reconstruction of geometric building descriptions. In this paper we focus on new extensions of our approach. The first extension is the detection of buildings using two alternative classification schemes: a binary or a statistical classification based on Bayesian nets, both using local geometric properties. The second extension is the extraction of roof structures as a first step towards the reconstruction of polyhedral building descriptions.

    @Article{brunn1998hierarchical,
    title = {Hierarchical Bayesian Nets for Building Extraction Using Dense Digital Surface Models},
    author = {Brunn, Ansgar and Weidner, Uwe},
    journal = {Journal for Photogrammetry \& Remote Sensing},
    year = {1998},
    pages = {296--307},
    volume = {53},
    abstract = {During the last years an increasing demand for 3D data of urban scenes can be recognized. Techniques for automatic acquisition of buildings are needed to satisfy this demand in an economic way. This paper describes an approach for building extraction using digital surface models (DSM) as input data. The first task is the detection of areas within the DSM which describe buildings. The second task is the reconstruction of geometric building descriptions. In this paper we focus on new extensions of our approach. The first extension is the detection of buildings using two alternative classification schemes: a binary or a statistical classification based on Bayesian nets, both using local geometric properties. The second extension is the extraction of roof structures as a first step towards the reconstruction of polyhedral building descriptions.},
    doi = {10.1016/S0924-2716(98)00012-4},
    }

  • W. Förstner, “On the Theoretical Accuracy of Multi Image Matching, Restoration and Triangulation,” in Festschrift zum 65. Geburtstag von Prof. Dr.-Ing. mult. G. Konecny, Institut für Photogrammetrie, Universität Hannover, 1998.
    [BibTeX] [PDF]

    The paper analyses the theoretical precision of integrated multiple image matching and image reconstruction, and the theoretical accuracy of the triangulation from a sequence of images specializing to tri- and binocular stereo. The estimated geometric parameters from multi image matching, used in aerial triangulation for point transfer, turns out to be statistically uncorrelated from the restored image, and the precision of the shift between two images does not depend on the number of images taking part in the multi image matching. Triangulating from an image sequence reveals the variance of the position of points perpendicular to the trajectory to decrease with the number of images whereas the variance of the distance of the 3D-point to the trajectory decreases with the cube of the number of images, taking the distance between the images as given. The case of three images, representative for three line cameras shows the distance to be independent of the central ray.

    @InProceedings{forstner1998theoretical,
    title = {On the Theoretical Accuracy of Multi Image Matching, Restoration and Triangulation},
    author = {F\"orstner, Wolfgang},
    booktitle = {Festschrift zum 65. Geburtstag von Prof. Dr.-Ing. mult. G. Konecny},
    year = {1998},
    address = {Institut f\"ur Photogrammetrie, Universit\"at Hannover},
    abstract = {The paper analyses the theoretical precision of integrated multiple image matching and image reconstruction, and the theoretical accuracy of the triangulation from a sequence of images specializing to tri- and binocular stereo. The estimated geometric parameters from multi image matching, used in aerial triangulation for point transfer, turns out to be statistically uncorrelated from the restored image, and the precision of the shift between two images does not depend on the number of images taking part in the multi image matching. Triangulating from an image sequence reveals the variance of the position of points perpendicular to the trajectory to decrease with the number of images whereas the variance of the distance of the 3D-point to the trajectory decreases with the cube of the number of images, taking the distance between the images as given. The case of three images, representative for three line cameras shows the distance to be independent of the central ray.},
    city = {Bonn},
    proceeding = {Festschrift zum 65. Geburtstag von Prof. Dr.-Ing. mult. G. Konecny},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1998Theoreticala.pdf},
    }

  • W. Förstner, “Working Group Report “Specification and propagation of uncertainty”,” in interne Veröffentlichung 1998, Bonn, Germany, 1998.
    [BibTeX] [PDF]
    [none]
    @InProceedings{forstner1998working,
    title = {Working Group Report "Specification and propagation of uncertainty"},
    author = {F\"orstner, Wolfgang},
    booktitle = {interne Ver\"offentlichung 1998},
    year = {1998},
    address = {Bonn, Germany},
    abstract = {[none]},
    city = {Bonn},
    proceeding = {(interne Ver\"offentlichung 1998)},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1998Working.pdf},
    }

  • H. Fehlis, “Ein videometrisches 6D-Trackingverfahren für den Einsatz in Virtuellen Studios,” Diplomarbeit Master Thesis, 1998.
    [BibTeX]
    [none]
    @MastersThesis{fehlis1998ein,
    title = {Ein videometrisches 6D-Trackingverfahren f\"ur den Einsatz in Virtuellen Studios},
    author = {Fehlis, Hendrik},
    school = {Institute of Photogrammetry, University of Bonn In Zusammenarbeit mit dem Institut f\"ur Informatik der Universit\"at Bonn},
    year = {1998},
    note = {Betreuung: Prof. Dr. Armin B. Cremers, Prof. Dr.-Ing. Wolfgang F\"orstner},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • A. Fischer, T. H. Kolbe, F. Lang, A. B. Cremers, W. Förstner, L. Plümer, and V. Steinhage, “Extracting Buildings from Aerial Images Using Hierarchical Aggregation in 2D and 3D,” in Computer Vision and Image Understanding, 1998. doi:10.1006/cviu.1998.0721
    [BibTeX] [PDF]

    We propose a model-based approach to automated 3D extraction of buildings from aerial images. We focus on a reconstruction strategy that is not restricted to a small class of buildings. Therefore, we employ a generic modeling approach which relies on the well-defined combination of building part models. Building parts are classified by their roof type. Starting from low-level image features we combine data-driven and model-driven processes within a multilevel aggregation hierarchy, thereby using a tight coupling of 2D image and 3D object modeling and processing, ending up in complex 3D building estimations of shape and location. Due to the explicit representation of well-defined processing states in terms of model-based 2D and 3D descriptions at all levels of modeling and data aggregation, our approach reveals a great potential for reliable building extraction.

    @InProceedings{fischer1998extracting,
    title = {Extracting Buildings from Aerial Images Using Hierarchical Aggregation in 2D and 3D},
    author = {Fischer, Andr\'e and Kolbe, Thomas H. and Lang, Felicitas and Cremers, Armin B. and F\"orstner, Wolfgang and Pl\"umer, Lutz and Steinhage, Volker},
    booktitle = {Computer Vision and Image Understanding},
    year = {1998},
    abstract = {We propose a model-based approach to automated 3D extraction of buildings from aerial images. We focus on a reconstruction strategy that is not restricted to a small class of buildings. Therefore, we employ a generic modeling approach which relies on the well-defined combination of building part models. Building parts are classified by their roof type. Starting from low-level image features we combine data-driven and model-driven processes within a multilevel aggregation hierarchy, thereby using a tight coupling of 2D image and 3D object modeling and processing, ending up in complex 3D building estimations of shape and location. Due to the explicit representation of well-defined processing states in terms of model-based 2D and 3D descriptions at all levels of modeling and data aggregation, our approach reveals a great potential for reliable building extraction.},
    city = {Bonn},
    doi = {10.1006/cviu.1998.0721},
    proceeding = {Computer Vision and Image Understanding},
    url = {https://www.ipb.uni-bonn.de/pdfs/Fischer1998Extracting.pdf},
    }

  • E. Gülch and H. Müller, “Virtuelle Staedte aus digitalen Bildern,” in Proc. of Virtual GIS, Rostock, 1998.
    [BibTeX] [PDF]

    Der Übergang von der klassischen Photogrammetrie mit analogen Bildern zur Digitalen Photogrammetrie mit digitalen Bildern und der Automation von Messprozessen eröffnet vielfältige Möglichkeiten zur effizienten Erfassung von 3D-Information in urbanen Gebieten. Es ist ein zunehmender Bedarf an realen 3D-Daten für eine Vielfalt von Anwendungen zu beobachten. Diese reichen von Managementaufgaben in Stadtplanung und Städtebau bis hin zu Funknetzsimulationen, Werbung und Spieleindustrie. 3D-Stadtmodelle eignen sich nicht nur zur Analyse und Simulation, sondern auch zur computeranimierten Darstellung bis hin zur Virtuellen Realität. Das hier vorgestellte System wird zur geometrischen Modellierung aus Mono-, Stereo- oder mehrfachüberdeckenden Luftbildern eingesetzt. Verschiedene automatisierte Module unterstützen den Operateur. Neben einer detaillierten geometrischen Erfassung spielt die Visualisierung von Texturen eine immer größere Rolle. Mit dem hier vorgestellten System können orientierte Luft- und Nahbereichsaufnahmen gleichermaßen zur automatischen Texturierung von erfaßten 3D-Objektmodellen eingesetzt werden. Integraler Bestandteil ist eine Entzerrung und die Wahl der Texturelementgröße im Objektraum, womit verschiedene Detaillierungsgrade (Level-of-Detail) der virtuellen Darstellung erzeugt werden können.

    @InProceedings{gulch1998virtuelle,
    title = {Virtuelle Staedte aus digitalen Bildern},
    author = {G\"ulch, Eberhard and M\"uller, Hardo},
    booktitle = {Proc. of Virtual GIS},
    year = {1998},
    address = {Rostock},
    abstract = {Der \"Ubergang von der klassischen Photogrammetrie mit analogen Bildern zur Digitalen Photogrammetrie mit digitalen Bildern und der Automation von Messprozessen er\"offnet vielf\"altige M\"oglichkeiten zur effizienten Erfassung von 3D-Information in urbanen Gebieten. Es ist ein zunehmender Bedarf an realen 3D-Daten f\"ur eine Vielfalt von Anwendungen zu beobachten. Diese reichen von Managementaufgaben in Stadtplanung und St\"adtebau bis hin zu Funknetzsimulationen, Werbung und Spieleindustrie. 3D-Stadtmodelle eignen sich nicht nur zur Analyse und Simulation, sondern auch zur computeranimierten Darstellung bis hin zur Virtuellen Realit\"at. Das hier vorgestellte System wird zur geometrischen Modellierung aus Mono-, Stereo- oder mehrfach\"uberdeckenden Luftbildern eingesetzt. Verschiedene automatisierte Module unterst\"utzen den Operateur. Neben einer detaillierten geometrischen Erfassung spielt die Visualisierung von Texturen eine immer gr\"o{\ss}ere Rolle. Mit dem hier vorgestellten System k\"onnen orientierte Luft- und Nahbereichsaufnahmen gleicherma{\ss}en zur automatischen Texturierung von erfa{\ss}ten 3D-Objektmodellen eingesetzt werden. Integraler Bestandteil ist eine Entzerrung und die Wahl der Texturelementgr\"o{\ss}e im Objektraum, womit verschiedene Detaillierungsgrade (Level-of-Detail) der virtuellen Darstellung erzeugt werden k\"onnen.},
    city = {Bonn},
    proceeding = {Proc. of Virtual GIS},
    url = {https://www.ipb.uni-bonn.de/pdfs/Gulch1998Virtuelle.pdf},
    }

  • E. Gülch, H. Müller, T. Läbe, and L. Ragia, “On the performance of semi-automatic building extraction,” in Proc. of ISPRS Commission III Symposium, Columbus, Ohio, 1998.
    [BibTeX] [PDF]

    A Semi-Automatic Building Extraction system using two or more digitized overlapping aerial images has been enhanced by increased automation for the measurement of saddle-back-roof buildings, hip-roof buildings and boxes. All newly developed modules have been incorporated in the object oriented design of the system. The new methods consist of a ground-point and roof-top matching tool and a robust determination of shape parameters, like e.g. gutter length and width. The current performance of building extraction is quantitatively and qualitatively evaluated. We examine the increased efficiency using the automated tools, the success rate of individual modules and the overall success rate using a combination of methods. A methodology for quantitative comparison is tested on footprints of buildings from classical stereo measurements and from semi-automatic measurements. A qualitative comparison in 3D of multiple measurements of complete buildings is performed on three different datasets.

    @InProceedings{gulch1998performance,
    title = {On the performance of semi-automatic building extraction},
    author = {G\"ulch, Eberhard and M\"uller, Hardo and L\"abe, Thomas and Ragia, Lemonia},
    booktitle = {Proc. of ISPRS Commission III Symposium},
    year = {1998},
    address = {Columbus, Ohio},
    abstract = {A Semi-Automatic Building Extraction system using two or more digitized overlapping aerial images has been enhanced by increased automation for the measurement of saddle-back-roof buildings, hip-roof buildings and boxes. All newly developed modules have been incorporated in the object oriented design of the system. The new methods consist of a ground-point and roof-top matching tool and a robust determination of shape parameters, like e.g. gutter length and width. The current performance of building extraction is quantitatively and qualitatively evaluated. We examine the increased efficiency using the automated tools, the success rate of individual modules and the overall success rate using a combination of methods. A methodology for quantitative comparison is tested on footprints of buildings from classical stereo measurements and from semi-automatic measurements. A qualitative comparison in 3D of multiple measurements of complete buildings is performed on three different datasets.},
    city = {Bonn},
    proceeding = {Proc. of ISPRS Commission III Symposium},
    url = {https://www.ipb.uni-bonn.de/pdfs/Gulch1998performance.pdf},
    }

  • T. Hau, “Orientierung und Kalibrierung eines Rangesensors unter Nutzung geometrischer Objektinformation,” in 18. Wissenschaftlich- Technische Jahrestagung der Deutschen Gesellschaft für Photogrammetrie und Fernerkundung, Technische Universität München, 1998.
    [BibTeX] [PDF]

    Abstract In the field of reverse engineering the methods of stripe projection are predestined to record smooth homogeneous object surfaces. A disadvantage of these methods is the low level of accuracy. Photogrammetric techniques, on the other hand, offer highly accurate methods for orientation, calibration and object reconstruction. For a more efficient and accurate surface recording we propose a fusion of both techniques: photogrammetry and methods of stripe projection. This combination includes an automated orientation and calibration procedure for a stripe sensor which works according to the principle of coded light. The paper focusses on the automatic determination of approximate values and the structure of the calibration process.

    @InProceedings{hau1998orientierung,
    title = {Orientierung und Kalibrierung eines Rangesensors unter Nutzung geometrischer Objektinformation},
    author = {Hau, Thomas},
    booktitle = {18. Wissenschaftlich- Technische Jahrestagung der Deutschen Gesellschaft f\"ur Photogrammetrie und Fernerkundung},
    year = {1998},
    address = {Technische Universit\"at M\"unchen},
    abstract = {Abstract In the field of reverse engineering the methods of stripe projection are predestined to record smooth homogeneous object surfaces. A disadvantage of these methods is the low level of accuracy. Photogrammetric techniques, on the other hand, offer highly accurate methods for orientation, calibration and object reconstruction. For a more efficient and accurate surface recording we propose a fusion of both techniques: photogrammetry and methods of stripe projection. This combination includes an automated orientation and calibration procedure for a stripe sensor which works according to the principle of coded light. The paper focusses on the automatic determination of approximate values and the structure of the calibration process.},
    city = {Bonn},
    proceeding = {18. Wissenschaftlich- Technische Jahrestagung der Deutschen Gesellschaft f\"ur Photogrammetrie und Fernerkundung},
    url = {https://www.ipb.uni-bonn.de/pdfs/Hau1998Orientierung.pdf},
    }

  • H. Heinen, “Automatische Bestimmung der Kameralage von Legoland-Bildern,” Diplomarbeit Master Thesis, 1998.
    [BibTeX]
    [none]
    @MastersThesis{heinen1998automatische,
    title = {Automatische Bestimmung der Kameralage von Legoland-Bildern},
    author = {Heinen, Holger},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1998},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. Steffen Abraham},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • S. Heuel and W. Förstner, “A Dual, Scalable and Hierarchical Representation for Perceptual Organization of Binary Images,” in IEEE Workshop on Perceptual Organization in Computer Vision, Santa Barbara, 1998.
    [BibTeX] [PDF]

    We propose a new representation for segmented images useful for Perceptual Organization. The representation has four distinct properties: (1) It is topologically consistent,~i.e. the image plane is completely described; (2) the representation treats fore- and background symmetrically, a change of fore- and background has a well-defined and transparent impact on the representation; (3) the hierarchical structure of the representation explicitly reflects the aggregation of parts and objects; (4) finally the representation has an associated scale, which refers to the significance of image parts and of their relationships. We present an example for such a representation, where the images consist of area type features and the significance of the relationships of the blobs are based on their proximity.

    @InProceedings{heuel1998dual,
    title = {A Dual, Scalable and Hierarchical Representation for Perceptual Organization of Binary Images},
    author = {Heuel, Stephan and F\"orstner, Wolfgang},
    booktitle = {IEEE Workshop on Perceptual Organization in Computer Vision},
    year = {1998},
    address = {Santa Barbara},
    abstract = {We propose a new representation for segmented images useful for Perceptual Organization. The representation has four distinct properties: (1) It is topologically consistent,~i.e. the image plane is completely described; (2) the representation treats fore- and background symmetrically, a change of fore- and background has a well-defined and transparent impact on the representation; (3) the hierarchical structure of the representation explicitly reflects the aggregation of parts and objects; (4) finally the representation has an associated scale, which refers to the significance of image parts and of their relationships. We present an example for such a representation, where the images consist of area type features and the significance of the relationships of the blobs are based on their proximity.},
    city = {Bonn},
    proceeding = {IEEE Workshop on Perceptual Organization in Computer Vision 1998},
    url = {https://www.ipb.uni-bonn.de/pdfs/Heuel1998Dual.pdf},
    }

  • B. Kastenholz, “Ein hierarchisches Konzept zur Artenidentifikation von Wildbienen durch morphologische Bildanalyse,” Diplomarbeit Master Thesis, 1998.
    [BibTeX]
    [none]
    @MastersThesis{kastenholz1998ein,
    title = {Ein hierarchisches Konzept zur Artenidentifikation von Wildbienen durch morphologische Bildanalyse},
    author = {Kastenholz, Bernd},
    school = {Institute of Photogrammetry, University of Bonn In Zusammenarbeit mit dem Institut f\"ur Informatik der Universit\"at Bonn},
    year = {1998},
    note = {Betreuung: Prof. Dr. Armin B. Cremers, Prof. Dr.-Ing. Wolfgang F\"orstner},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • S. Kummer, “Qualitätsbewertung photogrammetrisch erfaßter Geodaten,” Diplomarbeit Master Thesis, 1998.
    [BibTeX]
    [none]
    @MastersThesis{kummer1998qualitatsbewertung,
    title = {Qualit\"atsbewertung photogrammetrisch erfa{\ss}ter Geodaten},
    author = {Kummer, S\"onke},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1998},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dr.-Ing. Eberhard G\"ulch},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • T. Läbe and E. Gülch, “Robust Techniques for Estimating Parameters of 3D Building Primitives,” in Proc. of ISPRS Commission II Symposium, Cambridge, UK, 1998.
    [BibTeX] [PDF]

    A semi-automatic building extraction system using two or more digitized overlapping aerial images has been enhanced by increased automation for the measurement of saddleback-roof (lopsided and symmetric) buildings, hip-roof buildings and flat-roof building (boxes). The goal is to minimize the interaction an operator has to do for measuring the form and pose parameters of 3D building models of the above mentioned types. The automated tasks are computed on-line and fully integrated in the work flow. Thus accepting or correcting the results or adapting the automated calculation is possible. The used methods are grey value correlation for absolute heights and the robust estimation techniques RANSAC and Clustering for the determination of heights and the other form parameters of the building primitives. These methods work on automatically extracted line segments. The automated modules have been empirically evaluated on more than 250 buildings in two datasets with different image quality and different densities of built-up areas. The results of these tests show a success rate of up to 88% for a form parameter estimation module and the height measurement.

    @InProceedings{labe1998robust,
    title = {Robust Techniques for Estimating Parameters of 3D Building Primitives},
    author = {L\"abe, Thomas and G\"ulch, Eberhard},
    booktitle = {Proc. of ISPRS Commission II Symposium},
    year = {1998},
    address = {Cambridge, UK},
    abstract = {A semi-automatic building extraction system using two or more digitized overlapping aerial images has been enhanced by increased automation for the measurement of saddleback-roof (lopsided and symmetric) buildings, hip-roof buildings and flat-roof building (boxes). The goal is to minimize the interaction an operator has to do for measuring the form and pose parameters of 3D building models of the above mentioned types. The automated tasks are computed on-line and fully integrated in the work flow. Thus accepting or correcting the results or adapting the automated calculation is possible. The used methods are grey value correlation for absolute heights and the robust estimation techniques RANSAC and Clustering for the determination of heights and the other form parameters of the building primitives. These methods work on automatically extracted line segments. The automated modules have been empirically evaluated on more than 250 buildings in two datasets with different image quality and different densities of built-up areas. The results of these tests show a success rate of up to 88% for a form parameter estimation module and the height measurement.},
    city = {Bonn},
    proceeding = {Proc. of ISPRS Commission II Symposium},
    url = {https://www.ipb.uni-bonn.de/pdfs/Labe1998robust.pdf},
    }

  • H. Müller, “Object-oriented modeling for the extraction of geometry, texture and reflectance from digital images,” in Proc. of ISPRS Commission IV Symposium, Stuttgart, Germany, 1998.
    [BibTeX] [PDF]

    A semi-automatic system for extracting topographic features is being migrated to an object-oriented design for better maintainability. For that purpose an object-oreinted model of the extracted objects and the extraction methods is required. Moreover an appropriate software component model for the exchange with other systems is needed. We have modeled a class hierarchy for objects, that can be semi-automatically extracted from digital images. These objects are characterized by geometric, textural and reflectance properties. We have classified the extraction methods and modeled the message transfer of an interactive extraction mehtod. The component technologies CORBA and JavaBeans (TM) were used to make the extracted objects and system components available for other systems. We found out that an access to the objcets of the Semi-Automatic System by operation calls allows a more flexible data transfer and control of the system than standard file transfer. Therefore Geographic Information Systems (GIS) should support appropriate software component models to co-operate optimally with interactive or semi-automatic feature extraction systems.

    @InProceedings{muller1998object,
    title = {Object-oriented modeling for the extraction of geometry, texture and reflectance from digital images},
    author = {M\"uller, Hardo},
    booktitle = {Proc. of ISPRS Commission IV Symposium},
    year = {1998},
    address = {Stuttgart, Germany},
    abstract = {A semi-automatic system for extracting topographic features is being migrated to an object-oriented design for better maintainability. For that purpose an object-oreinted model of the extracted objects and the extraction methods is required. Moreover an appropriate software component model for the exchange with other systems is needed. We have modeled a class hierarchy for objects, that can be semi-automatically extracted from digital images. These objects are characterized by geometric, textural and reflectance properties. We have classified the extraction methods and modeled the message transfer of an interactive extraction mehtod. The component technologies CORBA and JavaBeans (TM) were used to make the extracted objects and system components available for other systems. We found out that an access to the objcets of the Semi-Automatic System by operation calls allows a more flexible data transfer and control of the system than standard file transfer. Therefore Geographic Information Systems (GIS) should support appropriate software component models to co-operate optimally with interactive or semi-automatic feature extraction systems.},
    city = {Bonn},
    proceeding = {Proc. of ISPRS Commission IV Symposium},
    url = {https://www.ipb.uni-bonn.de/pdfs/Muller1998Object.pdf},
    }

  • B. Moonen, “Plückerkoordinaten I: Die klassische Beschreibung,” in interne Veröffentlichung 1998, Bonn, Germany, 1998.
    [BibTeX]
    [none]
    @InProceedings{moonen1998pluckerkoordinaten,
    title = {Pl\"uckerkoordinaten I: Die klassische Beschreibung},
    author = {Moonen, Boudewijn},
    booktitle = {interne Ver\"offentlichung 1998},
    year = {1998},
    address = {Bonn, Germany},
    abstract = {[none]},
    city = {Bonn},
    proceeding = {(interne Ver\"offentlichung 1998)},
    }

  • B. Steines, “Bewegungsschätzung aus Bildtripeln,” Diplomarbeit Master Thesis, 1998.
    [BibTeX]
    [none]
    @MastersThesis{steines1998bewegungsschatzung,
    title = {Bewegungssch\"atzung aus Bildtripeln},
    author = {Steines, Bernd},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1998},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. Steffen Abraham},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • B. Waegli, “Investigations into the Noise Characteristics of Digitized Aerial Images,” in Proc. of ISPRS Commission II Symposium, Cambridge, UK, 1998.
    [BibTeX] [PDF]

    For automated image evaluation a precise characterization of the noise behaviour of the image data is needed. Digital image acquisition in aerial photogrammetry usually is performed by digitizing analogue film material. The noise of the digital image is composed of the noise due to the digitization process and of the noise of the analogue image. From a statistical point of view, noise can be described by the standard deviation and the correlation of the fluctuation of the intensity values. Our aim is to determine a noise model out of the images itself, without the need of special test patterns. We do this by following two approaches: pixelwise evaluation of multiple scans of the film material and regionwise evaluation from a single scan. The first approach analyses the noise coming from the digitization process, while in the second case we obtain the complete noise budget. We analyze the correlation between successive scans. First empirical investigations with different film material were performed and the results are presented.

    @InProceedings{waegli1998investigations,
    title = {Investigations into the Noise Characteristics of Digitized Aerial Images},
    author = {Waegli, Barbara},
    booktitle = {Proc. of ISPRS Commission II Symposium},
    year = {1998},
    address = {Cambridge, UK},
    abstract = {For automated image evaluation a precise characterization of the noise behaviour of the image data is needed. Digital image acquisition in aerial photogrammetry usually is performed by digitizing analogue film material. The noise of the digital image is composed of the noise due to the digitization process and of the noise of the analogue image. From a statistical point of view, noise can be described by the standard deviation and the correlation of the fluctuation of the intensity values. Our aim is to determine a noise model out of the images itself, without the need of special test patterns. We do this by following two approaches: pixelwise evaluation of multiple scans of the film material and regionwise evaluation from a single scan. The first approach analyses the noise coming from the digitization process, while in the second case we obtain the complete noise budget. We analyze the correlation between successive scans. First empirical investigations with different film material were performed and the results are presented.},
    city = {Bonn},
    proceeding = {Proc. of ISPRS Commission II Symposium},
    url = {https://www.ipb.uni-bonn.de/pdfs/Waegli1998Investigations.pdf},
    }

  • S. Winter and L. Ragia, “Contributions to a Quality Description of Areal Objects in Spatial Data Sets,” in Proc. of ISPRS Commission IV Symposium, Stuttgart, Germany, 1998.
    [BibTeX] [PDF]

    In this paper we present a quality evaluation of two-dimensional building acquisition. We propose methods for identification and quantification of differences between independently acquired regions, and we present a systematic classification of those differences. Differences between acquired sets Rj = rij of regions rij depend on the context of observation, on the technique of observation, and so on. We distinguish topological and em geometrical differences. Topological differences refer to the interior structure of a set of regions as well as to the structure of the boundary of a single region. Geometrical differences refer to the location of the boundary of a single region or of a set of regions, independent of their representation and of the structure of the boundaries. Identification of differences requires a matching of two data sets R1 and R2 which is done here by weighted topological relationships. For the identification of topological differences between two sets R1 and R2 of regions we use the two region adjacency graphs. For an identification of geometrical differences we use the zone skeleton between two matched subsets rp1 and rq2 of the given sets. The zone skeleton is labeled with the local distances of the corresponding boundaries of the subsets; especially we investigate its density function. An example, based on two real data sets of acquired ground plans of buildings, shows the feasibility of the approach.

    @InProceedings{winter1998contributions,
    title = {Contributions to a Quality Description of Areal Objects in Spatial Data Sets},
    author = {Winter, Stephan and Ragia, Lemonia},
    booktitle = {Proc. of ISPRS Commission IV Symposium},
    year = {1998},
    address = {Stuttgart, Germany},
    abstract = {In this paper we present a quality evaluation of two-dimensional building acquisition. We propose methods for identification and quantification of differences between independently acquired regions, and we present a systematic classification of those differences. Differences between acquired sets Rj = rij of regions rij depend on the context of observation, on the technique of observation, and so on. We distinguish topological and em geometrical differences. Topological differences refer to the interior structure of a set of regions as well as to the structure of the boundary of a single region. Geometrical differences refer to the location of the boundary of a single region or of a set of regions, independent of their representation and of the structure of the boundaries. Identification of differences requires a matching of two data sets R1 and R2 which is done here by weighted topological relationships. For the identification of topological differences between two sets R1 and R2 of regions we use the two region adjacency graphs. For an identification of geometrical differences we use the zone skeleton between two matched subsets rp1 and rq2 of the given sets. The zone skeleton is labeled with the local distances of the corresponding boundaries of the subsets; especially we investigate its density function. An example, based on two real data sets of acquired ground plans of buildings, shows the feasibility of the approach.},
    city = {Bonn},
    proceeding = {Proc. of ISPRS Commission IV Symposium},
    url = {https://www.ipb.uni-bonn.de/pdfs/Winter1998Contributions.pdf},
    }

1997

  • S. Abraham and W. Förstner, “Zur automatischen Modellwahl bei der Kalibrierung von CCD-Kameras,” in Proceedings: 19. DAGM-Symposium Mustererkennung, 1997, p. 147–155. doi:10.1007/978-3-642-60893-3_14
    [BibTeX] [PDF]

    Wir diskutieren zwei Kriterien zur Bewertung verschiedener Abbildungsmodelle im Rahmen der Kalibrierung einer Kamera. Die Beschreibungslänge des Datensatzes und die Stabilität/Präzision der 3D–Rekonstruktion in Abhängigkeit vom verwendeten Modell erlauben eine automatische Wahl aus einer Menge vorhandener Modelle. Am Beispiel der Off–Line Selbstkalibrierung mit verschiedenen Modellen zur Beschreibung der inneren Orientierung der Kamera demonstrieren wir diese Verfahren.

    @InProceedings{abraham1997zur,
    title = {Zur automatischen Modellwahl bei der Kalibrierung von CCD-Kameras},
    author = {Abraham, Steffen and F\"orstner, Wolfgang},
    booktitle = {Proceedings: 19. DAGM-Symposium Mustererkennung},
    year = {1997},
    pages = {147--155},
    abstract = {Wir diskutieren zwei Kriterien zur Bewertung verschiedener Abbildungsmodelle im Rahmen der Kalibrierung einer Kamera. Die Beschreibungsl\"ange des Datensatzes und die Stabilit\"at/Pr\"azision der 3D--Rekonstruktion in Abh\"angigkeit vom verwendeten Modell erlauben eine automatische Wahl aus einer Menge vorhandener Modelle. Am Beispiel der Off--Line Selbstkalibrierung mit verschiedenen Modellen zur Beschreibung der inneren Orientierung der Kamera demonstrieren wir diese Verfahren.},
    city = {Bonn},
    doi = {10.1007/978-3-642-60893-3_14},
    proceeding = {Proceedings: 19. DAGM-Symposium Mustererkennung},
    url = {https://www.ipb.uni-bonn.de/pdfs/Abraham1997Zur.pdf},
    }

  • S. Abraham and T. Hau, “Towards Autonomous High-Precision Calibration of Digital Cameras,” in Proc. of SPIE Annual Meeting, San Diego, 1997, p. 82–93. doi:10.1117/12.279802
    [BibTeX] [PDF]

    The demand for high accuracy, for example in 3D measurements or in quality control, requires high precision calibration of CCD cameras. In this paper we present a test-field-based high-precision calibration procedure with the focus on the techniques which enable an autonomous calibration: automatic control of the imaging process with geometric tests, algorithms for automatic point detection and matching, automatic determination of approximate values for the orientation parameters and automatic model selection of the interior orientation parameters. Finally, the parameters are estimated in an iterative least squares adjustment.

    @InProceedings{abraham1997towards,
    title = {Towards Autonomous High-Precision Calibration of Digital Cameras},
    author = {Abraham, Steffen and Hau, Thomas},
    booktitle = {Proc. of SPIE Annual Meeting},
    year = {1997},
    address = {San Diego},
    pages = {82--93},
    abstract = {The demand for high accuracy, for example in 3D measurements or in quality control, requires high precision calibration of CCD cameras. In this paper we present a test-field-based high-precision calibration procedure with the focus on the techniques which enable an autonomous calibration: automatic control of the imaging process with geometric tests, algorithms for automatic point detection and matching, automatic determination of approximate values for the orientation parameters and automatic model selection of the interior orientation parameters. Finally, the parameters are estimated in an iterative least squares adjustment.},
    city = {Bonn},
    doi = {10.1117/12.279802},
    proceeding = {Proc. of SPIE Annual Meeting},
    url = {https://www.ipb.uni-bonn.de/pdfs/Abraham1997Towards.pdf},
    }

  • M. Bank, “Entwicklung eines Merkmalszuordnungsverfahrens zur Orientierung von Bildern künstlicher Objekte,” Diplomarbeit Master Thesis, 1997.
    [BibTeX]
    [none]
    @MastersThesis{bank1997entwicklung,
    title = {Entwicklung eines Merkmalszuordnungsverfahrens zur Orientierung von Bildern k\"unstlicher Objekte},
    author = {Bank, Martin},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1997},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. Thomas Hau},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • A. Brunn and U. Weidner, “Extracting Buildings from Digital Surface Models,” in IAPRS: 3D Reconstruction and Modeling of Topographic Objects, Stuttgart, 1997. doi:10.1016/S0924-2716(98)00012-4
    [BibTeX] [PDF]

    This paper describes an approach for building extraction using Digital Surface Models (DSM) as input data. The first task is the detection of areas within the DSM which describe buildings. The second task is the reconstruction of buildings for which we apply parametric and prismatic building models. The main focus is on the detection, namely on the use of height and differential geometric information in combination. Furthermore, recent results for the extraction of roof structures as first step towards the extraction of polyhedral building descriptions are presented.

    @InProceedings{brunn1997extracting,
    title = {Extracting Buildings from Digital Surface Models},
    author = {Brunn, Ansgar and Weidner, Uwe},
    booktitle = {IAPRS: 3D Reconstruction and Modeling of Topographic Objects},
    year = {1997},
    address = {Stuttgart},
    abstract = {This paper describes an approach for building extraction using Digital Surface Models (DSM) as input data. The first task is the detection of areas within the DSM which describe buildings. The second task is the reconstruction of buildings for which we apply parametric and prismatic building models. The main focus is on the detection, namely on the use of height and differential geometric information in combination. Furthermore, recent results for the extraction of roof structures as first step towards the extraction of polyhedral building descriptions are presented.},
    city = {Bonn},
    doi = {10.1016/S0924-2716(98)00012-4},
    proceeding = {IAPRS: 3D Reconstruction and Modeling of Topographic Objects},
    url = {https://www.ipb.uni-bonn.de/pdfs/Brunn1997Extracting.pdf},
    }

  • T. Busch, “Zur Eignung eines CAD-Systems für die Erfassung, Verwaltung, Analyse und Präsentation von Geometrie- und Sachdaten,” Diplomarbeit Master Thesis, 1997.
    [BibTeX]
    [none]
    @MastersThesis{busch1997zur,
    title = {Zur Eignung eines CAD-Systems f\"ur die Erfassung, Verwaltung, Analyse und Pr\"asentation von Geometrie- und Sachdaten},
    author = {Busch, Thomas},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1997},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dr.-Ing. Stephan Winter},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • C. Droste, “Uncertainty in Parameter Estimation for Nonlinear Dynamical Models,” PhD Thesis, 1997.
    [BibTeX]

    Mathematical process modeling is one of the most important tools in geophysical sciences, since’ it allows quantitative prediction and the establishment of relations to measurements of real objects. Although there exist other kinds of models like classifications or qualitative descriptions allowing essential insights into the interdependencies between complex processes in applied sciences, especially geosciences, they lack in both, predictability and falsifiability. The quality of such models may only be evaluated by specialists, and since these specialists involve their personal a priori knowledge, the assessment is always subjective. The results of mathematical process models, however, are directly comparable to measurements which allow rejection or acceptance of the same, depending on the comparison between model predictions and observations. However, these methods of comparison are far from being unified or generally accepted themselves. This thesis focuses on models for nonlinear dynamical systems. One of the most important features of dynamical systems that is essential for the comparison of predictions and measurements has only recently been discussed in a broader scientific community, i.e. the inherent resistance of many dynamical processes to be predictable into the far future. This is commonly known as ‘butterfly effect’, which is a circumscription for the sensitivity of predictions to the initial conditions. It should be noted that the measures for that sensitivity can not only be applied to the so called chaotic systems but also to systems with non chaotic determinism. Actually for finite prediction times the difference is only gradual. Obviously, it is important to gather knowledge about this sensitivity to be able to assess the differences between model predictions and observations. This topic is closely related to uncertainty and error propagation for linear models, which is well elaborated. The connection of both theories in context of parameter estimation shall be established by this thesis. Often the elementary processes that govern the behavior of a complex geophysical system are well known, but the numerical application of corresponding fundamental laws fails, because the entities to which the laws could be applied are too numerous. On the one hand computational costs rise tremendously with increasing degrees of freedom, on the other hand it is not possible to measure each level of structural complexity, e.g. each grain of the sand, not to speak of atoms. Most geophysical processes have more degrees of freedom than practically treatable. Hence, it is necessary to describe those systems at another level. This is well known from physics: e.g. a fluid is not treated as a collection of particles but as a continuous medium, and a thermodynamic system is not described by the velocities of individual molecules but by temperatures, pressures and volumes. Be it regarded as refined or coarser level of modeling, the obvious result is a regularization, because the phase space, i.e. the space of possible states the system can take, is reduced in dimensions. Since the laws on this level generally cannot be derived from the known fundamental laws, new parameters come into the description, which are linked in an appropriate way to the properties of matter. The values of those parameters cannot be derived theoretically, because the solution of the high dimensional system would be the condition. Their values have to be determined by observations. Although some of them may be of general interest and are fixed by means independent of the model, almost every model contains some parameters that are only meaningful in the context of that specific model. These parameters cannot be determined by other means than by observations predictable by the specific model. In other words the model needs to be calibrated. Fitting the parameters of a model to observations is a problem of optimization. The most common approach to this is to minimize the positively weighted sum of squares of the differences between observations and model predictions. In linear models this problem is directly solvable. For other criteria or nonlinear models more costly methods must be applied, i.e. for instance iterative procedures like the multidimensional gradient methods. If one runs the risk of missing the correct optimum even more elaborate algorithms are available, e.g. simulated an-nealing or evolutionary algorithms. However, all optimization algorithms just act in accordance to the chosen extremal principle but do not refer to the structure of deviations generated by the structure of the system. In dynamical systems future states are predicted by means of the present state. Hence, the present errors propagate in time and influence negatively the reliability of predictions. The propagation laws for errors corresponding directly to structures of the dynamical model are widely ignored for parameter estimation in applied science. Often valuable information is wasted, because the application of optimization algorithms is standardized and does not take into account the model structure. The observed deficiencies in compatibility of models and the measurements of their corre-sponding dynamical systems take place due to – the presence of dynamical noise, caused by un modeled processes, – the limited level of accuracy for measurements and – numerical errors In many cases, especially the linear ones, there is a wide acceptance about how to deal with these errors and how to interpret the results. However, the vast majority of models in applied sciences describe processes that have high nonlinear complexity, that can only be observed once or show inherent features of chaos. In these cases there is no general agreement about the criteria required for the decision if a prediction is confirmed by a measurement, since the potential behavior is principally unknown. The Uncertainty about the potential behavior of systems is the main reason for disagreements about the validity of models and parameters, respectively. It cannot be principally eliminated, because the true dynamics of the system is unknown. Nevertheless, it is at least possible to exploit the structure of the corresponding model, and where possible reduce the level of uncertainty by choosing appropriate conditions for parameter estimation. This is the main topic of this thesis. The deviations of model predictions are predictable themselves, when the origin of occuring errors and the structure of the model is known. Methods for this possibility shall be developed to assess the value of individual measurements for parameter estimation. In laboratory experiments one is able to influence the initial conditions. Therefore we will seek to predict, which initial conditions are most suitable for parameter estimation, before the experiment is actually run. The stochastic response on the noise generated from the unmodeled environment as well as the tendency to expand or shrink measurement errors are properties of the system. Therefore, no generic improvement for parameter estimation can be given independent of the model structure. However, since we are given a mathematical model and therefore some structural information is known explicitly, this information can be exploited to reduce the harmful effects of unprofitable error propagation. In laboratory experiments, this may be used to select the most suitable initial conditions and in field studies it may at least help to select the most effective measurements. This thesis shall develop the main ideas in a fairly general context and illustrate the results for low dimensional, well known, nonlinear dynamical systems. However, the concept shall be transfered to a specific problem in geophysics (geomorphology) in order to show the applicability of the developed methods. Therefore, the entire model for a geomorphological problem has been developed, which deals with the migration of meandering rivers. This model is used to transfer as much as possible from the theoretical results. The detailed insight into the model helps to assess the different problems that appear during application of the developed theory. The topic of this thesis is extensively influenced by an interdisciplinary research project that is dedicated to interactions between and modeling of continental geosystems. Within this project a large variety of disciplines attempts to develop common concepts for the description of ‘geophysical’ processes on different time and space scales. The work in this collaborate research center is highly determined by the discrepancy between the exact and the holistic points of view. Exact mathematical descriptions are often too simple to reflect the whole complexity of natural processes, whereas the qualitative descriptions are not verifiable in a mathematical sense. This thesis touches overlapping aspects of both, and hopefully acts as a mediator.

    @PhDThesis{droste1997uncertainty,
    title = {Uncertainty in Parameter Estimation for Nonlinear Dynamical Models},
    author = {Droste, Christoph},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1997},
    abstract = {Mathematical process modeling is one of the most important tools in geophysical sciences, since' it allows quantitative prediction and the establishment of relations to measurements of real objects. Although there exist other kinds of models like classifications or qualitative descriptions allowing essential insights into the interdependencies between complex processes in applied sciences, especially geosciences, they lack in both, predictability and falsifiability. The quality of such models may only be evaluated by specialists, and since these specialists involve their personal a priori knowledge, the assessment is always subjective. The results of mathematical process models, however, are directly comparable to measurements which allow rejection or acceptance of the same, depending on the comparison between model predictions and observations. However, these methods of comparison are far from being unified or generally accepted themselves. This thesis focuses on models for nonlinear dynamical systems. One of the most important features of dynamical systems that is essential for the comparison of predictions and measurements has only recently been discussed in a broader scientific community, i.e. the inherent resistance of many dynamical processes to be predictable into the far future. This is commonly known as 'butterfly effect', which is a circumscription for the sensitivity of predictions to the initial conditions. It should be noted that the measures for that sensitivity can not only be applied to the so called chaotic systems but also to systems with non chaotic determinism. Actually for finite prediction times the difference is only gradual. Obviously, it is important to gather knowledge about this sensitivity to be able to assess the differences between model predictions and observations. This topic is closely related to uncertainty and error propagation for linear models, which is well elaborated. The connection of both theories in context of parameter estimation shall be established by this thesis. Often the elementary processes that govern the behavior of a complex geophysical system are well known, but the numerical application of corresponding fundamental laws fails, because the entities to which the laws could be applied are too numerous. On the one hand computational costs rise tremendously with increasing degrees of freedom, on the other hand it is not possible to measure each level of structural complexity, e.g. each grain of the sand, not to speak of atoms. Most geophysical processes have more degrees of freedom than practically treatable. Hence, it is necessary to describe those systems at another level. This is well known from physics: e.g. a fluid is not treated as a collection of particles but as a continuous medium, and a thermodynamic system is not described by the velocities of individual molecules but by temperatures, pressures and volumes. Be it regarded as refined or coarser level of modeling, the
    obvious result is a regularization, because the phase space, i.e. the space of possible states the system can take, is reduced in dimensions. Since the laws on this level generally cannot be derived from the known fundamental laws, new parameters come into the description, which are linked in an appropriate way to the properties of matter. The values of those parameters cannot be derived theoretically, because the solution of the high dimensional system would be the condition. Their values have to be determined by observations. Although some of them may be of general interest and are fixed by means independent of the model, almost every model contains some parameters that are only meaningful in the context of that specific model. These parameters cannot be determined by other means than by observations predictable by the specific model. In other words the model needs to be calibrated. Fitting the parameters of a model to observations is a problem of optimization. The most common approach to this is to minimize the positively weighted sum of squares of the differences between observations and model predictions. In linear models this problem is directly solvable. For other criteria or nonlinear models more costly methods must be applied, i.e. for instance iterative procedures like the multidimensional gradient methods. If one runs the risk of missing the correct optimum even more elaborate algorithms are available, e.g. simulated an-nealing or evolutionary algorithms. However, all optimization algorithms just act in accordance to the chosen extremal principle but do not refer to the structure of deviations generated by the structure of the system. In dynamical systems future states are predicted by means of the present state. Hence, the present errors propagate in time and influence negatively the reliability of predictions. The propagation laws for errors corresponding directly to structures of the dynamical model are widely ignored for parameter estimation in applied science. Often valuable information is wasted, because the application of optimization algorithms is standardized and does not take into account the model structure. The observed deficiencies in compatibility of models and the measurements of their corre-sponding dynamical systems take place due to - the presence of dynamical noise, caused by un modeled processes, - the limited level of accuracy for measurements and - numerical errors In many cases, especially the linear ones, there is a wide acceptance about how to deal with these errors and how to interpret the results. However, the vast majority of models in applied sciences describe processes that have high nonlinear complexity, that can only be observed once or show inherent features of chaos. In these cases there is no general agreement about the criteria required for the decision if a prediction is confirmed by a measurement, since the potential behavior is principally unknown. The Uncertainty about the
    potential behavior of systems is the main reason for disagreements about the validity of models and parameters, respectively. It cannot be principally eliminated, because the true dynamics of the system is unknown. Nevertheless, it is at least possible to exploit the structure of the corresponding model, and where possible reduce the level of uncertainty by choosing appropriate conditions for parameter estimation. This is the main topic of this thesis. The deviations of model predictions are predictable themselves, when the origin of occuring errors and the structure of the model is known. Methods for this possibility shall be developed to assess the value of individual measurements for parameter estimation. In laboratory experiments one is able to influence the initial conditions. Therefore we will seek to predict, which initial conditions are most suitable for parameter estimation, before the experiment is actually run. The stochastic response on the noise generated from the unmodeled environment as well as the tendency to expand or shrink measurement errors are properties of the system. Therefore, no generic improvement for parameter estimation can be given independent of the model structure. However, since we are given a mathematical model and therefore some structural information is known explicitly, this information can be exploited to reduce the harmful effects of unprofitable error propagation. In laboratory experiments, this may be used to select the most suitable initial conditions and in field studies it may at least help to select the most effective measurements. This thesis shall develop the main ideas in a fairly general context and illustrate the results for low dimensional, well known, nonlinear dynamical systems. However, the concept shall be transfered to a specific problem in geophysics (geomorphology) in order to show the applicability of the developed methods. Therefore, the entire model for a geomorphological problem has been developed, which deals with the migration of meandering rivers. This model is used to transfer as much as possible from the theoretical results. The detailed insight into the model helps to assess the different problems that appear during application of the developed theory. The topic of this thesis is extensively influenced by an interdisciplinary research project that is dedicated to interactions between and modeling of continental geosystems. Within this project a large variety of disciplines attempts to develop common concepts for the description of 'geophysical' processes on different time and space scales. The work in this collaborate research center is highly determined by the discrepancy between the exact and the holistic points of view. Exact mathematical descriptions are often too simple to reflect the whole complexity of natural processes, whereas the qualitative descriptions are not verifiable in a mathematical sense. This thesis touches overlapping aspects of both, and
    hopefully acts as a mediator.},
    }

  • H. Ernst, “Photogrammetric Mensuration of Water Surfaces,” Diplomarbeit Master Thesis, 1997.
    [BibTeX]
    [none]
    @MastersThesis{ernst1997photogrammetric,
    title = {Photogrammetric Mensuration of Water Surfaces},
    author = {Ernst, Heike},
    school = {Institute of Photogrammetry, University of Bonn In Zusammenarbeit mit dem Department of Surveying and Mapping der Norwegian University of Science and Technology in Trondheim},
    year = {1997},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Prof. Ingolf Hadem},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • W. Förstner and E. Gülch, “Automatic Orientation and Recognition in Highly Structured Scenes,” in Proc. of SPIE Annual Meeting, San Diego, 1997. doi:10.1016/S0924-2716(98)00022-7
    [BibTeX] [PDF]

    The paper discusses the impact of scene and assessment models for videometry. Full automation of calibration and orientation procedures appears to be as necessary for enlarging the field of applications as the use of explicit geometric and semantic scene knowledge. The focus on achieving highest possible accuracy needs to be embedded into a broader context of scene analysis. Examples demonstrate the feasibility of tools from Computer Vision for image metrology.

    @InProceedings{forstner1997automatic,
    title = {Automatic Orientation and Recognition in Highly Structured Scenes},
    author = {F\"orstner, Wolfgang and G\"ulch, Eberhard},
    booktitle = {Proc. of SPIE Annual Meeting},
    year = {1997},
    address = {San Diego},
    abstract = {The paper discusses the impact of scene and assessment models for videometry. Full automation of calibration and orientation procedures appears to be as necessary for enlarging the field of applications as the use of explicit geometric and semantic scene knowledge. The focus on achieving highest possible accuracy needs to be embedded into a broader context of scene analysis. Examples demonstrate the feasibility of tools from Computer Vision for image metrology.},
    city = {Bonn},
    doi = {10.1016/S0924-2716(98)00022-7},
    proceeding = {Proc. of SPIE Annual Meeting 1997 (to appear)},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1997Automatic.pdf},
    }

  • A. Faber, “Segmentierung und Klassifikation orthogonaler Straßennetze mittels Richtungscodierung,” Institute of Photogrammetry, University of Bonn 1997.
    [BibTeX] [PDF]

    Die Struktur einer Stadt, wie sie sich vom Satelliten oder von hochfliegenden Flugzeugen aus darstellt, wird hauptsächlich durch drei grundlegende Elemente geprägt: – das Straßennetz, – die Morphologie der Baulichen Nutzung und – die Verteilung der Vegetation. Das Straßennetz fällt in Satellitenbildern wegen der besonderen geometrischen Anordnung als künstliches Objekt unmittelbar auf und läßt sich, zumindest partiell, mit nicht zu komplexen Bildverarbeitungsverfahren erfassen. Die bauliche Nutzung, vor allem Wohnhäuser und Industrieanlagen, füllt die zwischen den Straßen aufgespannten Baublöcke, im Inneren von Städten fast vollständig am Rand partiell. In kleinmaßstäbigen Bildern sind die Gebäude schwierig zu erkennen und fast nicht zu rekonstruieren. Die Komplementarität von Straßennetz und baulicher Nutzung ist deutlich aus den in Abb. 1 auf der nächsten Seite dargestellten Grundformen der Siedlungsstruktur zu erkennen ist. Beide Elemente werden zusatzlich durch die Vegetation überlagert. Breite Straßen sind oft durch Alleen begrenzt, Plätze enthalten häufig kreis- oder rechteckig angeordnete Baumgruppen. Dagegen ist die Vegetation in Vorstädten außerordentlich vielfältig und unregelmäßig angeordnet. Liegen Farb- oder Multispektralaufnahmen vor, kann die Vegetation vergleichsweise leicht von den beiden anderen Elementen getrennt werden.

    @TechReport{faber1997segmentierung,
    title = {Segmentierung und Klassifikation orthogonaler Stra{\ss}ennetze mittels Richtungscodierung},
    author = {Faber, Anette},
    institution = {Institute of Photogrammetry, University of Bonn},
    year = {1997},
    abstract = {Die Struktur einer Stadt, wie sie sich vom Satelliten oder von hochfliegenden Flugzeugen aus darstellt, wird haupts\"achlich durch drei grundlegende Elemente gepr\"agt: - das Stra{\ss}ennetz, - die Morphologie der Baulichen Nutzung und - die Verteilung der Vegetation. Das Stra{\ss}ennetz f\"allt in Satellitenbildern wegen der besonderen geometrischen Anordnung als k\"unstliches Objekt unmittelbar auf und l\"a{\ss}t sich, zumindest partiell, mit nicht zu komplexen Bildverarbeitungsverfahren erfassen. Die bauliche Nutzung, vor allem Wohnh\"auser und Industrieanlagen, f\"ullt die zwischen den Stra{\ss}en aufgespannten Baubl\"ocke, im Inneren von St\"adten fast vollst\"andig am Rand partiell. In kleinma{\ss}st\"abigen Bildern sind die Geb\"aude schwierig zu erkennen und fast nicht zu rekonstruieren. Die Komplementarit\"at von Stra{\ss}ennetz und baulicher Nutzung ist deutlich aus den in Abb. 1 auf der n\"achsten Seite dargestellten Grundformen der Siedlungsstruktur zu erkennen ist. Beide Elemente werden zusatzlich durch die Vegetation \"uberlagert. Breite Stra{\ss}en sind oft durch Alleen begrenzt, Pl\"atze enthalten h\"aufig kreis- oder rechteckig angeordnete Baumgruppen. Dagegen ist die Vegetation in Vorst\"adten au{\ss}erordentlich vielf\"altig und unregelm\"a{\ss}ig angeordnet. Liegen Farb- oder Multispektralaufnahmen vor, kann die Vegetation vergleichsweise leicht von den beiden anderen Elementen getrennt werden.},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Faber1997Segmentierung.pdf},
    }

  • A. Fischer, T. H. Kolbe, and F. Lang, “Integration of 2D and 3D Reasoning for Building Reconstruction using a Generic Hierarchical Model,” in Workshop on Semantic Modeling for the Acquisition of Topographic Information from Images and Maps SMATI, Bonn, Germany, 1997.
    [BibTeX] [PDF]

    We propose a model-based approach to automated 3D extraction of buildings from aerial images. The semantics of the concept building is used to control and to evaluate building extraction in all stages of the process. It is encoded by means of generic 3D object modeling, which describes thematic and geometric constraints on the spatial appearences of buildings, and 2D image modeling, which integrates sensor and illumination modeling to describe the projective appearences of buildings, specific for the given aerial imagery. 3D object and 2D image modeling are tightly coupled within a multi-layered framework which reveals a is-part-of -hierachy of 3D building parts and their corresponding projective descriptions. The overall strategy follows the paradigm of hypotheses generation and verification and combines bottom-up (data-driven) and top-down (model-driven) processes. Due to the explicit representation of well defined processing states in terms of model-based 2D and 3D descriptions at all levels of modeling and data aggregation our approach reveals a great potential for a reliable building extraction.

    @InProceedings{fischer1997integration,
    title = {Integration of 2D and 3D Reasoning for Building Reconstruction using a Generic Hierarchical Model},
    author = {Fischer, Andr\'e and Kolbe, Thomas H. and Lang, Felicitas},
    booktitle = {Workshop on Semantic Modeling for the Acquisition of Topographic Information from Images and Maps SMATI},
    year = {1997},
    address = {Bonn, Germany},
    abstract = {We propose a model-based approach to automated 3D extraction of buildings from aerial images. The semantics of the concept building is used to control and to evaluate building extraction in all stages of the process. It is encoded by means of generic 3D object modeling, which describes thematic and geometric constraints on the spatial appearences of buildings, and 2D image modeling, which integrates sensor and illumination modeling to describe the projective appearences of buildings, specific for the given aerial imagery. 3D object and 2D image modeling are tightly coupled within a multi-layered framework which reveals a is-part-of -hierachy of 3D building parts and their corresponding projective descriptions. The overall strategy follows the paradigm of hypotheses generation and verification and combines bottom-up (data-driven) and top-down (model-driven) processes. Due to the explicit representation of well defined processing states in terms of model-based 2D and 3D descriptions at all levels of modeling and data aggregation our approach reveals a great potential for a reliable building extraction.},
    city = {Bonn},
    proceeding = {Workshop on Semantic Modeling for the Acquisition of Topographic Information from Images and Maps SMATI},
    url = {https://www.ipb.uni-bonn.de/pdfs/Fischer1997Integration.pdf},
    }

  • E. Gülch, “Application of Semi-Automatic Building Acquisition,” in Proc. of Ascona Workshop ‘Automatic Extraction of Man-Made Objects from Aerial and Space Images’, 1997. doi:10.1007/978-3-0348-8906-3_13
    [BibTeX] [PDF]

    There is an increasing request for 3D data on city objects of all kinds, confirmed by a recent European wide study on 3D city models. To acquire 3D information in urban areas still is costly, only automated or at least semi-automatic methods appear feasible in the long run to reach the cost-effectiveness, necessary for a broad application. The variety of tasks and available sensor data is very large, which puts high requirements on the design of methods and the flexibility of the acquisition process. This paper discusses the requirements we have encountered so far. We present the design and current status of a semi-automatic system for 3D building acquisition. We demonstrate the potential for handling a variety of applications, using different sensor data under different initial conditions.

    @InProceedings{gulch1997application,
    title = {Application of Semi-Automatic Building Acquisition},
    author = {G\"ulch, Eberhard},
    booktitle = {Proc. of Ascona Workshop 'Automatic Extraction of Man-Made Objects from Aerial and Space Images'},
    year = {1997},
    abstract = {There is an increasing request for 3D data on city objects of all kinds, confirmed by a recent European wide study on 3D city models. To acquire 3D information in urban areas still is costly, only automated or at least semi-automatic methods appear feasible in the long run to reach the cost-effectiveness, necessary for a broad application. The variety of tasks and available sensor data is very large, which puts high requirements on the design of methods and the flexibility of the acquisition process. This paper discusses the requirements we have encountered so far. We present the design and current status of a semi-automatic system for 3D building acquisition. We demonstrate the potential for handling a variety of applications, using different sensor data under different initial conditions.},
    city = {Bonn},
    doi = {10.1007/978-3-0348-8906-3_13},
    proceeding = {Proc. of Ascona Workshop 1997 Automatic Extraction of Man-Made Objects from Aerial and Space Images (to appear)},
    url = {https://www.ipb.uni-bonn.de/pdfs/Gulch1997Application.pdf},
    }

  • E. Gülch and H. Müller, “Object-oriented software design in semiautomatic building extraction,” in Proc. Integrating Photogrammetric Techniques with Scene Analysis and Machine Vision III, Orlando, USA, 1997. doi:10.1117/12.281043
    [BibTeX] [PDF]

    Developing a system for semiautomatic building acquisition is a complex process, that requires constant integration and updating of software modules and user interfaces. To facilitate these processes we apply an object-oriented design not only for the data but also for the software involved. We use the Unified Modeling Language (UML) to describe the object-oriented modeling of the system in different levels of detail. We can distinguish between use cases from the users point of view, that represent a sequence of actions, yielding in an observable result and the use cases for the programmers, who can use the system as a class library to integrate the acquisition modules in their own software. The structure of the system is based on the Model-View-Controller (MVC) design pattern. An example from the integration of automated texture extraction for the visualization of results demonstrate the feasibility of this approach.

    @InProceedings{gulch1997object,
    title = {Object-oriented software design in semiautomatic building extraction},
    author = {G\"ulch, Eberhard and M\"uller, Hardo},
    booktitle = {Proc. Integrating Photogrammetric Techniques with Scene Analysis and Machine Vision III},
    year = {1997},
    address = {Orlando, USA},
    abstract = {Developing a system for semiautomatic building acquisition is a complex process, that requires constant integration and updating of software modules and user interfaces. To facilitate these processes we apply an object-oriented design not only for the data but also for the software involved. We use the Unified Modeling Language (UML) to describe the object-oriented modeling of the system in different levels of detail. We can distinguish between use cases from the users point of view, that represent a sequence of actions, yielding in an observable result and the use cases for the programmers, who can use the system as a class library to integrate the acquisition modules in their own software. The structure of the system is based on the Model-View-Controller (MVC) design pattern. An example from the integration of automated texture extraction for the visualization of results demonstrate the feasibility of this approach.},
    city = {Bonn},
    doi = {10.1117/12.281043},
    proceeding = {Proc. Integrating Photogrammetric Techniques with Scene Analysis and Machine Vision III},
    url = {https://www.ipb.uni-bonn.de/pdfs/Gulch1997Object.pdf},
    }

  • G. L. Gimel’farb, J. Schmidt, and A. Braunmandl, “Gibbs Fields with Multiple Pairwise Interactions as a Tool for Modeling Grid-Based Data,” in Workshop Process Modelling and Landform Evolution, Bonn, Germany, 1997. doi:10.1007/BFb0009719
    [BibTeX]

    If spatial homogeneity is restricted to only a translation invariance then Gibbs random fields provide effective means for probabilistic modeling of homogeneous or piecewise-homogeneous scalar data on finite rectangular 2D grids. We discuss basic features of novel models with multiple pairwise interactions between the signals in grid sites. These models show good results in simulating and segmenting piecewise–homogeneous image textures. They differ from more widely known ones, such as the autobinomial or Gauss-Markov models, in that both the interaction structure and strengths are learnt from a given training sample. A new learning approach, based on conditional maximum likelihood estimates of the model parameters, provided that the training sample may rank a feasible top place in a parent population, is proposed. We applied the model to reproduce geomorphometric patterns derived from a terrain classification method. The study is aimed at showing that texture analysis, based on a quantification of neighborhood relationships, can be used to descriminate landform types. The texture segmentation shows good correlations with the terrain classification used for learning the Gibbs model parameters. The approach could be valuable in quantifying geomorphometric structures and manual terrain classification schemes.

    @InProceedings{gimelfarb1997gibbs,
    title = {Gibbs Fields with Multiple Pairwise Interactions as a Tool for Modeling Grid-Based Data},
    author = {Gimel'farb, Georgy L. and Schmidt, Jochen and Braunmandl, Andre},
    booktitle = {Workshop Process Modelling and Landform Evolution},
    year = {1997},
    address = {Bonn, Germany},
    abstract = {If spatial homogeneity is restricted to only a translation invariance then Gibbs random fields provide effective means for probabilistic modeling of homogeneous or piecewise-homogeneous scalar data on finite rectangular 2D grids. We discuss basic features of novel models with multiple pairwise interactions between the signals in grid sites. These models show good results in simulating and segmenting piecewise--homogeneous image textures. They differ from more widely known ones, such as the autobinomial or Gauss-Markov models, in that both the interaction structure and strengths are learnt from a given training sample. A new learning approach, based on conditional maximum likelihood estimates of the model parameters, provided that the training sample may rank a feasible top place in a parent population, is proposed. We applied the model to reproduce geomorphometric patterns derived from a terrain classification method. The study is aimed at showing that texture analysis, based on a quantification of neighborhood relationships, can be used to descriminate landform types. The texture segmentation shows good correlations with the terrain classification used for learning the Gibbs model parameters. The approach could be valuable in quantifying geomorphometric structures and manual terrain classification schemes.},
    doi = {10.1007/BFb0009719},
    }

  • S. Heuel, “Duale, skalierbare und hierarchische Partionierung und Gruppierung von Binärfiguren,” Diplomarbeit Master Thesis, 1997.
    [BibTeX]
    [none]
    @MastersThesis{heuel1997duale,
    title = {Duale, skalierbare und hierarchische Partionierung und Gruppierung von Bin\"arfiguren},
    author = {Heuel, Stephan},
    school = {Institute of Photogrammetry, University of Bonn In Zusammenarbeit mit dem Institut f\"ur Informatik der Universit\"at Bonn},
    year = {1997},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Prof. Dr. Joachim Buhmann},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • S. Jans and J. Kindermann, “Objektrekonstruktion durch Bildfolgenanalyse,” Diplomarbeit Master Thesis, 1997.
    [BibTeX]
    [none]
    @MastersThesis{jans1997objektrekonstruktion,
    title = {Objektrekonstruktion durch Bildfolgenanalyse},
    author = {Jans, S\"onke and Kindermann, J\"urgen},
    school = {Institute of Photogrammetry, University of Bonn In Zusammenarbeit mit dem Institut f\"ur Informatik der Universit\"at Bonn},
    year = {1997},
    note = {Betreuung: Prof. Dr. Armin B. Cremers, Prof. Dr.-Ing. Wolfgang F\"orstner},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • T. Läbe, “Automatic Exterior Orientation in Practice,” GIM Interational, Geomatics Info Magazine, vol. 11, p. 63–67, 1997.
    [BibTeX]

    A bottleneck of today’s automation of image orientation is the identification of control points for the exterior orientation. A solution for this problem is presented. It is based on 3D-wireframe models of buildings as ground control points. The article describes the setup of a database of such control points and the use of the data for an automatic exterior orientation.

    @Article{laebe1997automatic,
    title = {Automatic Exterior Orientation in Practice},
    author = {L\"abe, Thomas},
    journal = {GIM Interational, Geomatics Info Magazine},
    year = {1997},
    pages = {63--67},
    volume = {11},
    abstract = {A bottleneck of today's automation of image orientation is the identification of control points for the exterior orientation. A solution for this problem is presented. It is based on 3D-wireframe models of buildings as ground control points. The article describes the setup of a database of such control points and the use of the data for an automatic exterior orientation.},
    }

  • H. Müller, “Designing an object-oriented matching tool,” in IAPRS: 3D Reconstruction and Modeling of Topographic Objects, Stuttgart, 1997.
    [BibTeX] [PDF]

    A semiautomatic building extraction system has been extended by an automatic matching tool. It is used for an automatic measurement of building-heights and a semiautomatic determination of ground-heights. The object-oriented design of this matching tool gives the motivation for a design pattern of a general matching tool. This design pattern describes the object-oriented design of implementing several matching techniques within one system. It is able to be applied to several kinds of topographic objects, if a matching technique is known. We show as an example the implementation of a point matching tool, which uses the matching techniques Intensity Correlation and Gradient Correlation as a refinement of Feature Vector Matching.

    @InProceedings{muller1997designing,
    title = {Designing an object-oriented matching tool},
    author = {M\"uller, Hardo},
    booktitle = {IAPRS: 3D Reconstruction and Modeling of Topographic Objects},
    year = {1997},
    address = {Stuttgart},
    abstract = {A semiautomatic building extraction system has been extended by an automatic matching tool. It is used for an automatic measurement of building-heights and a semiautomatic determination of ground-heights. The object-oriented design of this matching tool gives the motivation for a design pattern of a general matching tool. This design pattern describes the object-oriented design of implementing several matching techniques within one system. It is able to be applied to several kinds of topographic objects, if a matching technique is known. We show as an example the implementation of a point matching tool, which uses the matching techniques Intensity Correlation and Gradient Correlation as a refinement of Feature Vector Matching.},
    city = {Bonn},
    proceeding = {IAPRS: 3D Reconstruction and Modeling of Topographic Objects},
    url = {https://www.ipb.uni-bonn.de/pdfs/Muller1997Designing.pdf},
    }

  • L. Teleki, “Dreidimensionale Qualitative Gebäuderekonstruktion,” PhD Thesis, 1997.
    [BibTeX]

    The need for three-dimensional building data for city models increased during the last years. In many case the building data are not complete or they are missing. Due to this reason the capture and update of this data is required. As the tasks are expensive, automation will help to reduce costs. This dissertation discusses the automatic reconstruction of the topology of buildings from qualitatively described three-dimensional structures. Starting point are corners of buildings, extracted on a stereo based analysis. The corners as well as their mutual relations are classified qualitatively. We present an algorithm that connects the corners to buildings. The result can then be used for a quantitative reconstruction. The exclusive use of qualitative description of three-dimensional structures for the reconstruction of the topology is a new approach. The majority of known algorithms are using numerical and statistical methods. The advantage of the qualitative framework lies in a reduction of the number of solutions in contrast numerical methods. Topological reconstruction uses geometrical informations and relations between the corners. Such are i.e. the collinearity of corner edges, the coplanarity of planes or orientation information of edges of the form the edge directs up-left. The framework contains steps for the connection of the corner edges, the reconstruction of the building planes and the consistency verification of intermediate results. A partial reconstruction of the building is done if corners are missing. The methodology transfers known methods from mathematics especially analytic geometry to a new qualitative framework. The framework and the qualitative descriptions show that qualitative reasoning are in principle suitable for building extraction.

    @PhDThesis{teleki1997dreidimensionale,
    title = {Dreidimensionale Qualitative Geb\"auderekonstruktion},
    author = {Teleki, Laszlo},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1997},
    abstract = {The need for three-dimensional building data for city models increased during the last years. In many case the building data are not complete or they are missing. Due to this reason the capture and update of this data is required. As the tasks are expensive, automation will help to reduce costs. This dissertation discusses the automatic reconstruction of the topology of buildings from qualitatively described three-dimensional structures. Starting point are corners of buildings, extracted on a stereo based analysis. The corners as well as their mutual relations are classified qualitatively. We present an algorithm that connects the corners to buildings. The result can then be used for a quantitative reconstruction. The exclusive use of qualitative description of three-dimensional structures for the reconstruction of the topology is a new approach. The majority of known algorithms are using numerical and statistical methods. The advantage of the qualitative framework lies in a reduction of the number of solutions in contrast numerical methods. Topological reconstruction uses geometrical informations and relations between the corners. Such are i.e. the collinearity of corner edges, the coplanarity of planes or orientation information of edges of the form the edge directs up-left. The framework contains steps for the connection of the corner edges, the reconstruction of the building planes and the consistency verification of intermediate results. A partial reconstruction of the building is done if corners are missing. The methodology transfers known methods from mathematics especially analytic geometry to a new qualitative framework. The framework and the qualitative descriptions show that qualitative reasoning are in principle suitable for building extraction.},
    }

  • U. Weidner, “Digital Surface Models for Building Extraction,” in Automatic Extraction of Man-Made Objects from Aerial and Space Images (II), 1997. doi:10.1007/978-3-0348-8906-3_19
    [BibTeX] [PDF]

    This paper describes an approach to building extraction using Digital Surface Models (DSM) as input data. The approach consists of building detection and reconstruction using parametric and prismatic building models. The main focus is on the extraction of roof structures, an extension of the previously published work, as first step towards the extraction of polyhedral building descriptions in order to also allow the extraction of complex buildings.

    @InProceedings{weidner1997digital,
    title = {Digital Surface Models for Building Extraction},
    author = {Weidner, Uwe},
    booktitle = {Automatic Extraction of Man-Made Objects from Aerial and Space Images (II)},
    year = {1997},
    editor = {Gruen, A.},
    abstract = {This paper describes an approach to building extraction using Digital Surface Models (DSM) as input data. The approach consists of building detection and reconstruction using parametric and prismatic building models. The main focus is on the extraction of roof structures, an extension of the previously published work, as first step towards the extraction of polyhedral building descriptions in order to also allow the extraction of complex buildings.},
    city = {Bonn},
    doi = {10.1007/978-3-0348-8906-3_19},
    proceeding = {Automatic Extraction of Man-Made Objects from Aerial and Space Images (II)},
    url = {https://www.ipb.uni-bonn.de/pdfs/Weidner1997Digital.pdf},
    }

  • U. Weidner, “Roof Extraction from Digital Surface Models,” Institut für Photogrammetrie Bonn 1997.
    [BibTeX]

    In this report we deal with roof extraction from Digital Surface Models (DSM) as a first step to integrate polyhedral building models into our approach to building extraction from DSM (Weidner and Förstner 1995, Weidner 1997a). Results are also presented in Weidner 1997b.

    @TechReport{weidner1997roof,
    title = {Roof Extraction from Digital Surface Models},
    author = {Weidner, Uwe},
    institution = {Institut f\"ur Photogrammetrie Bonn},
    year = {1997},
    abstract = {In this report we deal with roof extraction from Digital Surface Models (DSM) as a first step to integrate polyhedral building models into our approach to building extraction from DSM (Weidner and F\"orstner 1995, Weidner 1997a). Results are also presented in Weidner 1997b.},
    city = {Bonn},
    }

  • U. Weidner and A. Brunn, “Discriminating Building and Vegetation Areas within Digital Surface Models,” Institute of Photogrammetry, University of Bonn 1997.
    [BibTeX]
    [none]
    @TechReport{weidner1997discriminating,
    title = {Discriminating Building and Vegetation Areas within Digital Surface Models},
    author = {Weidner, Uwe and Brunn, Ansgar},
    institution = {Institute of Photogrammetry, University of Bonn},
    year = {1997},
    abstract = {[none]},
    city = {Bonn},
    }

1996

  • A. Brunn, F. Lang, and W. Förstner, “A Procedure for Segmenting Surfaces by Symbolic and Iconic Image Fusion,” in Mustererkennung 96, Proceeding of the DAGM 96, Heidelberg, Germany, 1996, p. 11–20. doi:10.1007/978-3-642-80294-2_2
    [BibTeX] [PDF]

    This paper deals with the derivation of a symbolic surface description exploiting the information of multiple images while using a minimum of domain knowledge. We present a new concept for segmenting surfaces by fusing multiple images both on the iconic and on the symbolic description level. In a first step a local 3D-reconstruction and interpretation is derived based on the result of a polymorphic feature extraction. It serves as prior information for a second step which refines the initial segmentation using the radiometric image content. Examples of the proposed procedure are presented for the application of 3D-building reconstruction from aerial images.

    @InProceedings{brunn1996procedure,
    title = {A Procedure for Segmenting Surfaces by Symbolic and Iconic Image Fusion},
    author = {Brunn, Ansgar and Lang, Felicitas and F\"orstner, Wolfgang},
    booktitle = {Mustererkennung 96, Proceeding of the DAGM 96},
    year = {1996},
    address = {Heidelberg, Germany},
    pages = {11--20},
    abstract = {This paper deals with the derivation of a symbolic surface description exploiting the information of multiple images while using a minimum of domain knowledge. We present a new concept for segmenting surfaces by fusing multiple images both on the iconic and on the symbolic description level. In a first step a local 3D-reconstruction and interpretation is derived based on the result of a polymorphic feature extraction. It serves as prior information for a second step which refines the initial segmentation using the radiometric image content. Examples of the proposed procedure are presented for the application of 3D-building reconstruction from aerial images.},
    city = {Bonn},
    doi = {10.1007/978-3-642-80294-2_2},
    proceeding = {Mustererkennung 96, Proceeding of the DAGM 96},
    url = {https://www.ipb.uni-bonn.de/pdfs/Brunn1996Procedure.pdf},
    }

  • A. Brunn, F. Lang, E. Gulch, and W. Förstner, “A Multi-Layer Strategy for 3D Building Acquisition,” in Proceeding of IAPR-TC7 Workshop, Graz, 1996.
    [BibTeX] [PDF]

    In various projects we investigate on the extraction of buildings on different type and representation of data. This paper presents a strategy for 3D building acquisition which combines different approaches based on different levels of description. The approach consists of detection of regions of interest and automatic and semiautomatic reconstruction of object parts and complete buildings. We incorporate the approach in a global concept of interaction between scene and sensors for image interpretation.

    @InProceedings{brunn1996multi,
    title = {A Multi-Layer Strategy for 3D Building Acquisition},
    author = {Brunn, Ansgar and Lang, Felicitas and G\ulch, Eberhard and F\"orstner, Wolfgang},
    booktitle = {Proceeding of IAPR-TC7 Workshop},
    year = {1996},
    address = {Graz},
    abstract = {In various projects we investigate on the extraction of buildings on different type and representation of data. This paper presents a strategy for 3D building acquisition which combines different approaches based on different levels of description. The approach consists of detection of regions of interest and automatic and semiautomatic reconstruction of object parts and complete buildings. We incorporate the approach in a global concept of interaction between scene and sensors for image interpretation.},
    city = {Bonn},
    proceeding = {Proceeding of IAPR-TC7 Workshop},
    url = {https://www.ipb.uni-bonn.de/pdfs/Brunn1996Multi.pdf},
    }

  • H. Dickel, “Untersuchung der robusten Schätzung der relativen Orientierungselemente,” Diplomarbeit Master Thesis, 1996.
    [BibTeX]
    [none]
    @MastersThesis{dickel1996untersuchung,
    title = {Untersuchung der robusten Sch\"atzung der relativen Orientierungselemente},
    author = {Dickel, Hartmut},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1996},
    note = {Betreuung: Dr.-Ing. Karl-Heiko Ellenbeck, Prof. Dr.-Ing. Wolfgang F\"orstner},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • W. Förstner, “10 Pros and Cons Against Performance Characterization of Vision Algorithms,” in Workshop on “Performance Characteristics of Vision Algorithms”, Cambridge, 1996.
    [BibTeX] [PDF]

    The paper discusses objections against performance characterization of vision algorithms and explains their motivation. Short and long term arguments are given which overcome these objections. The methodology for performance characterization is sketched to demonstrate the feasibility of empirical testing of vision algorithms.

    @InProceedings{forstner199610,
    title = {10 Pros and Cons Against Performance Characterization of Vision Algorithms},
    author = {F\"orstner, Wolfgang},
    booktitle = {Workshop on "Performance Characteristics of Vision Algorithms"},
    year = {1996},
    address = {Cambridge},
    abstract = {The paper discusses objections against performance characterization of vision algorithms and explains their motivation. Short and long term arguments are given which overcome these objections. The methodology for performance characterization is sketched to demonstrate the feasibility of empirical testing of vision algorithms.},
    city = {Bonn},
    proceeding = {Workshop on #Performance##Characteristics##of##Vision##Algorithms#},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner199610.pdf},
    }

  • W. Förstner, “Automatische 3D-Objekterfassung und -erkennung,” in gekürzte Fassung des Vortrags aus der Ringvorlesung ‘Bildverarbeitung und Mustererkennung’ an der Universität Bonn im WS 1995/96, Bonn, Germany, 1996.
    [BibTeX] [PDF]

    Der Beitrag stellt photogrammetrische Systeme zur automatischen Vermessung von Oberflächen vor, die seit einigen Jahren im praktischen Einsatz sind, und präsentiert aktuelle Forschungs-und Entwicklungsarbeiten zur Gebäudeextraktion.

    @InProceedings{forstner1996automatische,
    title = {Automatische 3D-Objekterfassung und -erkennung},
    author = {F\"orstner, Wolfgang},
    booktitle = {gek\"urzte Fassung des Vortrags aus der Ringvorlesung 'Bildverarbeitung und Mustererkennung' an der Universit\"at Bonn im WS 1995/96},
    year = {1996},
    address = {Bonn, Germany},
    abstract = {Der Beitrag stellt photogrammetrische Systeme zur automatischen Vermessung von Oberfl\"achen vor, die seit einigen Jahren im praktischen Einsatz sind, und pr\"asentiert aktuelle Forschungs-und Entwicklungsarbeiten zur Geb\"audeextraktion.},
    city = {Bonn},
    proceeding = {gek\"urzte Fassung des Vortrags auf der Ringvorlesung Bildverarbeitung und Mustererkennung an der Universit\"at Bonn im WS 1995/96},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1996Automatische.pdf},
    }

  • A. Fischer, “Zur analytischen Herleitung von Aspektgraphen,” Diplomarebit Master Thesis, 1996.
    [BibTeX]
    [none]
    @MastersThesis{fischer1996zur,
    title = {Zur analytischen Herleitung von Aspektgraphen},
    author = {Fischer, Andr\'e},
    school = {Institute of Photogrammetry, University of Bonn In Zusammenarbeit mit dem Institut f\"ur Informatik der Universit\"at Bonn},
    year = {1996},
    type = {Diplomarebit},
    abstract = {[none]},
    city = {Bonn},
    }

  • E. Gülch, “Extraction of 3D objects from aerial photographs,” in Proc. COST UCE Action 4 Workshop “Information Systems and Processes for urban civil engineering”, Rome, Italy, 1996.
    [BibTeX] [PDF]

    There is an increasing request for 3D data on city objects of all kinds for urban design, confirmed by a recent European wide study on 3D city models. To acquire 3D information in urban areas still is costly, only automated or at least semi-automatic methods appear feasible in the long run to reach the cost-effectiveness, necessary for a broad application. This paper presents a semi-automatic system for 3D building acquisition from various sensor data, mainly, however, from stereo pairs of digitized aerial images. The operator is supported by various automated modules. Very complex buildings can be modeled by a combination of volumetric primitives. The system does not require stereo-viewing and is such suitable also for non-photogrammetrists. The output of the process are 3D volumetric primitives, ready for further analysis in CAD systems or for visualization purposes in combination with automatically extracted texture. We present results from the acquisition of 3D building information in a suburban area and the centers of two cities and give more details on the acquisition times and the quality of the derived data.

    @InProceedings{gulch1996extraction,
    title = {Extraction of 3D objects from aerial photographs},
    author = {G\"ulch, Eberhard},
    booktitle = {Proc. COST UCE Action 4 Workshop "Information Systems and Processes for urban civil engineering"},
    year = {1996},
    address = {Rome, Italy},
    abstract = {There is an increasing request for 3D data on city objects of all kinds for urban design, confirmed by a recent European wide study on 3D city models. To acquire 3D information in urban areas still is costly, only automated or at least semi-automatic methods appear feasible in the long run to reach the cost-effectiveness, necessary for a broad application. This paper presents a semi-automatic system for 3D building acquisition from various sensor data, mainly, however, from stereo pairs of digitized aerial images. The operator is supported by various automated modules. Very complex buildings can be modeled by a combination of volumetric primitives. The system does not require stereo-viewing and is such suitable also for non-photogrammetrists. The output of the process are 3D volumetric primitives, ready for further analysis in CAD systems or for visualization purposes in combination with automatically extracted texture. We present results from the acquisition of 3D building information in a suburban area and the centers of two cities and give more details on the acquisition times and the quality of the derived data.},
    city = {Bonn},
    proceeding = {Proc. COST UCE Action 4 Workshop #Information##Systems##and##Processes##for##urban##civil##engineering#},
    url = {https://www.ipb.uni-bonn.de/pdfs/Gulch1996Extraction.pdf},
    }

  • E. Gülch, “One-Eye Stereo System for the Acquisition of Complex 3D Building Descriptions,” in GIS, 1996.
    [BibTeX] [PDF]

    \textbf{Summary} An easy usable system for the semi-automatic acquisition of detailed 3D building descriptions from a multitude of images is provided. This approach tackles robustly and efficiently most of the problems of 3D building reconstruction, namely occlusions, inverse mapping, and noise. The 3D modeling is based on Constructive Solid Geometry (CSG) and various automated and supporting tools. Our experiences on the acquisition of an extensive scene are evaluated. \textbf{Zusammenfassung} Monokulares Stereo System für die Erfassung komplexer 3D-Gebäudebeschreibungen. Für die semiautomatische Erfassung detaillierter 3D-Gebäudebeschreibungen aus einer Menge von Bildern wird ein einfach handhabbares System vorgestellt. Auf robuste und effiziente Weise werden die meisten Probleme bei der 3D-Gebäuderekonstruktion – Verdeckungen, inverse Abbildung und Rauschen – angegangen. Die 3D-Modellierung basiert auf der Constructive Solid Geometry (CSG) und zahlreichen automatisierten und unterstützenden Werkzeugen. Unsere Erfahrungen mit einer grossflächigen Erfassung werden evaluiert.

    @InProceedings{gulch1996one,
    title = {One-Eye Stereo System for the Acquisition of Complex 3D Building Descriptions},
    author = {G\"ulch, Eberhard},
    booktitle = {GIS},
    year = {1996},
    abstract = {\textbf{Summary} An easy usable system for the semi-automatic acquisition of detailed 3D building descriptions from a multitude of images is provided. This approach tackles robustly and efficiently most of the problems of 3D building reconstruction, namely occlusions, inverse mapping, and noise. The 3D modeling is based on Constructive Solid Geometry (CSG) and various automated and supporting tools. Our experiences on the acquisition of an extensive scene are evaluated. \textbf{Zusammenfassung} Monokulares Stereo System f\"ur die Erfassung komplexer 3D-Geb\"audebeschreibungen. F\"ur die semiautomatische Erfassung detaillierter 3D-Geb\"audebeschreibungen aus einer Menge von Bildern wird ein einfach handhabbares System vorgestellt. Auf robuste und effiziente Weise werden die meisten Probleme bei der 3D-Geb\"auderekonstruktion -- Verdeckungen, inverse Abbildung und Rauschen -- angegangen. Die 3D-Modellierung basiert auf der Constructive Solid Geometry (CSG) und zahlreichen automatisierten und unterst\"utzenden Werkzeugen. Unsere Erfahrungen mit einer grossfl\"achigen Erfassung werden evaluiert.},
    city = {Bonn},
    proceeding = {GIS},
    url = {https://www.ipb.uni-bonn.de/pdfs/Gulch1996One.pdf},
    }

  • V. Gallrein, “Bildkalibrierung zur Erzeugung von Doppler-Global-Velocimeter Geschwindigkeitsbildern,” Diplomarbeit Master Thesis, 1996.
    [BibTeX]
    [none]
    @MastersThesis{gallrein1996bildkalibrierung,
    title = {Bildkalibrierung zur Erzeugung von Doppler-Global-Velocimeter Geschwindigkeitsbildern},
    author = {Gallrein, Volkmar},
    school = {Institute of Photogrammetry, University of Bonn In Zusammenarbeit mit dem Institut f\"ur Antriebstechnik der DLR und dem Institut f\"ur Informatik der Universit\"at Bonn},
    year = {1996},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Prof. Dr. Armin B. Cremers},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • G. L. Gimel’farb, “Texture Modeling by Multiple Pairwise Pixel Interactions,” in IEEE T-PAMI, 1996. doi:10.1109/34.544081
    [BibTeX] [PDF]

    A Markov random field model with a Gibbs probability distribution (GPD) is proposed for describing particular classes of grayscale images which can be called spatially uniform stochastic textures. The model takes into account only multiple short- and long-range pairwise interactions between the gray levels in the pixels. An effective learning scheme is introduced to recover a structure and strength of the interactions using maximal likelihood estimates of the potentials in the GPD as desired parameters. The scheme is based on an analytic initial approximation of the estimates and their subsequent refinement by a stochastic approximation. Experiments in modelling natural textures show the utility of the proposed model.

    @InProceedings{gimelfarb1996texture,
    title = {Texture Modeling by Multiple Pairwise Pixel Interactions},
    author = {Gimel'farb, Georgy L.},
    booktitle = {IEEE T-PAMI},
    year = {1996},
    abstract = {A Markov random field model with a Gibbs probability distribution (GPD) is proposed for describing particular classes of grayscale images which can be called spatially uniform stochastic textures. The model takes into account only multiple short- and long-range pairwise interactions between the gray levels in the pixels. An effective learning scheme is introduced to recover a structure and strength of the interactions using maximal likelihood estimates of the potentials in the GPD as desired parameters. The scheme is based on an analytic initial approximation of the estimates and their subsequent refinement by a stochastic approximation. Experiments in modelling natural textures show the utility of the proposed model.},
    city = {Bonn},
    doi = {10.1109/34.544081},
    proceeding = {IEEE T-PAMI},
    url = {https://www.ipb.uni-bonn.de/pdfs/Gimelfarb1996Texture.pdf},
    }

  • D. Jäger, “Segmentierung triangulierter Flächen,” DIplomarbeit Master Thesis, 1996.
    [BibTeX]
    [none]
    @MastersThesis{jager1996segmentierung,
    title = {Segmentierung triangulierter Fl\"achen},
    author = {J\"ager, Dagmar},
    school = {Institute of Photogrammetry, University of Bonn In Zusammenarbeit mit dem Institut f\"ur Informatik der Universit\"at Bonn},
    year = {1996},
    type = {DIplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • N. Kaufmann, “Zuordnung von Knotenstrukturen für die Gebäuderekonstruktion,” Diplomarbeit Master Thesis, 1996.
    [BibTeX]
    [none]
    @MastersThesis{kaufmann1996zuordnung,
    title = {Zuordnung von Knotenstrukturen f\"ur die Geb\"auderekonstruktion},
    author = {Kaufmann, Norbert},
    school = {Institute of Photogrammetry, University of Bonn In Zusammenarbeit mit dem Institut f\"ur Informatik der Universit\"at Bonn},
    year = {1996},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • T. Läbe and K. H. Ellenbeck, “3D-Wireframe Models As Ground Control Points For The Automatic Exterior Orientation,” in Proc. of 18th ISPRS Congress, Vienna, 1996.
    [BibTeX] [PDF]

    The bottleneck of today’s automation of image orientation is the identification of control points for exterior orientation. A solution for this problem is presented. It is based on 3D-wireframe models of buildings as ground control points. The paper describes the setup of a database of such control points.

    @InProceedings{labe19963d,
    title = {3D-Wireframe Models As Ground Control Points For The Automatic Exterior Orientation},
    author = {L\"abe, Thomas and Ellenbeck, Karl Heiko},
    booktitle = {Proc. of 18th ISPRS Congress},
    year = {1996},
    address = {Vienna},
    abstract = {The bottleneck of today's automation of image orientation is the identification of control points for exterior orientation. A solution for this problem is presented. It is based on 3D-wireframe models of buildings as ground control points. The paper describes the setup of a database of such control points.},
    city = {Bonn},
    proceeding = {Proc. of 18th ISPRS Congress},
    url = {https://www.ipb.uni-bonn.de/pdfs/Labe19963D.pdf},
    }

  • F. Lang and W. Förstner, “3D-City Modeling with a Digital One-Eye Stereo System,” in Proc. ISPRS Congress Comm. IV, Vienna, 1996.
    [BibTeX] [PDF]

    3D-city information is crucial for a number of applications in city planning, environmental control or for telecommunication. We describe a semiautomatic system for acquiring the 3D-shape of buildings as topographic objects. Buildings are either modeled as a freely structured union of basic shape primitives or as prisms with an arbitrary ground plan, covering a large percentage of existing buildings. Interaction takes place in only one image, requiring the operator to specify the approximate structure and shape of the buildings. 3D-reconstruction including both, height determination and form adaptation, is performed automatically using various matching tools. The paper describes the features of the system and reports on its efficiency based on an extensive test.

    @InProceedings{lang19963d,
    title = {3D-City Modeling with a Digital One-Eye Stereo System},
    author = {Lang, Felicitas and F\"orstner, Wolfgang},
    booktitle = {Proc. ISPRS Congress Comm. IV},
    year = {1996},
    address = {Vienna},
    abstract = {3D-city information is crucial for a number of applications in city planning, environmental control or for telecommunication. We describe a semiautomatic system for acquiring the 3D-shape of buildings as topographic objects. Buildings are either modeled as a freely structured union of basic shape primitives or as prisms with an arbitrary ground plan, covering a large percentage of existing buildings. Interaction takes place in only one image, requiring the operator to specify the approximate structure and shape of the buildings. 3D-reconstruction including both, height determination and form adaptation, is performed automatically using various matching tools. The paper describes the features of the system and reports on its efficiency based on an extensive test.},
    city = {Bonn},
    proceeding = {Proc. ISPRS Congress Comm. IV},
    url = {https://www.ipb.uni-bonn.de/pdfs/Lang19963D.pdf},
    }

  • F. Lang and W. Förstner, “Surface Reconstruction of Man-Made Objects using Polymorphic Mid-Level Features and Generic Scene Knowledge,” Zeitschrift für Photogrammetrie und Fernerkundung, vol. 6, p. 193–202, 1996.
    [BibTeX] [PDF]

    This paper presents a new concept for 3D-surface reconstruction, which infers domain specific local 3D-structures in space from its observed local 2D-structures in multiple images using polymorphic relational image descriptions. A 3D-aggregation can combine these local 3D-structures and thus results in a 3D-boundary representation of man-made objects being useful for different analyses and simulations.

    @Article{lang1996surface,
    title = {Surface Reconstruction of Man-Made Objects using Polymorphic Mid-Level Features and Generic Scene Knowledge},
    author = {Lang, Felicitas and F\"orstner, Wolfgang},
    journal = {Zeitschrift f\"ur Photogrammetrie und Fernerkundung},
    year = {1996},
    pages = {193--202},
    volume = {6},
    abstract = {This paper presents a new concept for 3D-surface reconstruction, which infers domain specific local 3D-structures in space from its observed local 2D-structures in multiple images using polymorphic relational image descriptions. A 3D-aggregation can combine these local 3D-structures and thus results in a 3D-boundary representation of man-made objects being useful for different analyses and simulations.},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Lang1996Surface.pdf},
    }

  • L. Teleki, “Constraint Logic Programing – a Framework for Qualitative Reasoning,” in Qualitative Reasoning, The Tenth International Workshop, Lake Tahoe, 1996.
    [BibTeX] [PDF]

    We propose to use Constraint Logic Programming (CLP) for the specification and implementation of Qualitative Reasoning (QR) problems that are specialized Constraint Satisfaction Problems. The use of CLP has two advantages: (i) CLP gives a well defined and understood logical framework for the problem specification, and (ii) CLP is not only a logical framework, it is also a family of languages specially developed for solving classes of CSP problems. Thus we obtain a class of powerful implementation languages for rapid prototyping. To illustrate the steps of specification and implementation we describe in detail the core of the QSIM algorithm \cite{kuipers94:qualitative}, namely the filtering of the state transitions in the CLP framework. We show how the basic constraints are specified in this framework and describe the technical aspects of an implementation. We want to demonstrate the advantages of CLP through an example for a large and complex qualitative reasoning algorithm.

    @InProceedings{teleki1996constraint,
    title = {Constraint Logic Programing - a Framework for Qualitative Reasoning},
    author = {Teleki, L\'aszl\'o},
    booktitle = {Qualitative Reasoning, The Tenth International Workshop},
    year = {1996},
    address = {Lake Tahoe},
    editor = {Iwasaki, Yumi and Farquhar, Adam},
    abstract = {We propose to use Constraint Logic Programming (CLP) for the specification and implementation of Qualitative Reasoning (QR) problems that are specialized Constraint Satisfaction Problems. The use of CLP has two advantages: (i) CLP gives a well defined and understood logical framework for the problem specification, and (ii) CLP is not only a logical framework, it is also a family of languages specially developed for solving classes of CSP problems. Thus we obtain a class of powerful implementation languages for rapid prototyping. To illustrate the steps of specification and implementation we describe in detail the core of the QSIM algorithm \cite{kuipers94:qualitative}, namely the filtering of the state transitions in the CLP framework. We show how the basic constraints are specified in this framework and describe the technical aspects of an implementation. We want to demonstrate the advantages of CLP through an example for a large and complex qualitative reasoning algorithm.},
    city = {Bonn},
    proceeding = {Qualitative Reasoning, The Tenth International Workshop},
    url = {https://www.ipb.uni-bonn.de/pdfs/Teleki1996Constraint.pdf},
    }

  • L. Teleki, “Embedding Qualitative Reasoning into Constraint Logic Programming,” in Proc. of the Workshop on Constraint Programming Applications, in conjunction with the Second International Conf. on Principles and Practice of Constraint Programming (CP96), Cambridge, Massachusetts, USA, 1996.
    [BibTeX] [PDF]

    We propose to use Constraint Logic Programming (CLP) for the specification and implementation of Qualitative Reasoning (QR) problemsthat are specialized Constraint Satisfaction Problems. Although it has long been recognized that many frameworks, like the QSIM algorithm \cite{kuipers94:qualitative} can be viewed as a set of constraint satisfaction problems (CSP), the area of QR is relatively unknown in the CLP literature. In this paper we would like to present, through the language of QSIM, one set of questions and problems analyzed in the area of QR and to present the CLP specification of the key algorithm of QSIM, the c-filter. We show how the basic constraints of QSIM are specified and describe the technical aspects of our implementation.

    @InProceedings{teleki1996embedding,
    title = {Embedding Qualitative Reasoning into Constraint Logic Programming},
    author = {Teleki, L\'aszl\'o},
    booktitle = {Proc. of the Workshop on Constraint Programming Applications, in conjunction with the Second International Conf. on Principles and Practice of Constraint Programming (CP96)},
    year = {1996},
    address = {Cambridge, Massachusetts, USA},
    abstract = {We propose to use Constraint Logic Programming (CLP) for the specification and implementation of Qualitative Reasoning (QR) problemsthat are specialized Constraint Satisfaction Problems. Although it has long been recognized that many frameworks, like the QSIM algorithm \cite{kuipers94:qualitative} can be viewed as a set of constraint satisfaction problems (CSP), the area of QR is relatively unknown in the CLP literature. In this paper we would like to present, through the language of QSIM, one set of questions and problems analyzed in the area of QR and to present the CLP specification of the key algorithm of QSIM, the c-filter. We show how the basic constraints of QSIM are specified and describe the technical aspects of our implementation.},
    city = {Bonn},
    proceeding = {Proc. of the Workshop on Constraint Programming Applications, in conjunction with the Second International Conf. on Principles and Practice of Constraint Programming (CP96)},
    url = {https://www.ipb.uni-bonn.de/pdfs/Teleki1996Embedding.pdf},
    }

  • U. Weidner, “An Approach to Building Extraction from Digital Surface Models,” in Proc. of 18th ISPRS Congress, Vienna, 1996.
    [BibTeX] [PDF]

    Motivated by the test data sets of ISPRS WG III/3 on image understanding we investigate the feasibility of building extraction using high-resolution Digital Surface Models (DSM) as input data, which do not only contain information about the topographic surface like Digital Elevation Models (DEM), but also information about the buildings. The steps of the proposed procedure increasingly use explicit domain knowledge, specifically geometric constraints in the form of parametric and prismatic building models. The reconstruction of the prismatic models and the selection of the models are based on the principle of Minimum Description Length (MDL). In Weidner/Förstner (1995): Towards Automatic Building Reconstruction from High Resolution Digital Elevation Models we already described the general strategy of our approach, including building detection and building reconstruction. The main extensions of this contribution consists of the automatic selection of the model for the reconstruction of buildings. This selection is based on the principle of Minimum Description Length (MDL), as well as the reconstruction of prismatic building models. In addition, we also discuss the possible use of information from GIS or maps in our approach.

    @InProceedings{weidner1996approach,
    title = {An Approach to Building Extraction from Digital Surface Models},
    author = {Weidner, Uwe},
    booktitle = {Proc. of 18th ISPRS Congress},
    year = {1996},
    address = {Vienna},
    abstract = {Motivated by the test data sets of ISPRS WG III/3 on image understanding we investigate the feasibility of building extraction using high-resolution Digital Surface Models (DSM) as input data, which do not only contain information about the topographic surface like Digital Elevation Models (DEM), but also information about the buildings. The steps of the proposed procedure increasingly use explicit domain knowledge, specifically geometric constraints in the form of parametric and prismatic building models. The reconstruction of the prismatic models and the selection of the models are based on the principle of Minimum Description Length (MDL). In Weidner/F\"orstner (1995): Towards Automatic Building Reconstruction from High Resolution Digital Elevation Models we already described the general strategy of our approach, including building detection and building reconstruction. The main extensions of this contribution consists of the automatic selection of the model for the reconstruction of buildings. This selection is based on the principle of Minimum Description Length (MDL), as well as the reconstruction of prismatic building models. In addition, we also discuss the possible use of information from GIS or maps in our approach.},
    city = {Bonn},
    proceeding = {Proc. of 18th ISPRS Congress},
    url = {https://www.ipb.uni-bonn.de/pdfs/Weidner1996Approach.pdf},
    }

  • U. Weidner, “MDL-basierte Formrekonstruktion zur Gebäudeextraktion,” in Wissenschaftliche Jahrestagung der DGPF, 1996.
    [BibTeX] [PDF]

    Dieses Papier ist die Zusammenfassung eines Vortrages während der wissenschaftlichen Jahrestagung der DGPF 1995 und beschreibt einen MDL-basierten Ansatz zur Rekonstruktion von Polygonen unter Nutzung lokaler und globaler Restriktionen.

    @InProceedings{weidner1996mdl,
    title = {MDL-basierte Formrekonstruktion zur Geb\"audeextraktion},
    author = {Weidner, Uwe},
    booktitle = {Wissenschaftliche Jahrestagung der DGPF},
    year = {1996},
    abstract = {Dieses Papier ist die Zusammenfassung eines Vortrages w\"ahrend der wissenschaftlichen Jahrestagung der DGPF 1995 und beschreibt einen MDL-basierten Ansatz zur Rekonstruktion von Polygonen unter Nutzung lokaler und globaler Restriktionen.},
    city = {Bonn},
    proceeding = {Wissenschaftliche Jahrestagung der DGPF 1995},
    url = {https://www.ipb.uni-bonn.de/pdfs/Weidner1996MDL.pdf},
    }

  • S. Winter, “Beobachtungsunsicherheit und topologische Relationen,” in Workshop Datenqualität und Metainformation in Geo-Informationssystemen, Rostock, 1996, p. 141–154.
    [BibTeX] [PDF]

    In diesem Beitrag wird eine Methode zur Bestimmung der topologischen Relationen zwischen zwei in ihrer Lage unsicheren Regionen vorgestellt. Die Bestimmung basiert auf den Extremwerten einer Abstandsfunktion entlang eines Skeletts durch unsichere Schnittmengen zwischen den Regionen. Mit einer solchen Repräsentation vertiefen wir die Relationskonzepte der 9-Intersektion, indem wir über topologisch invariante Merkmale hinaus auch metrische Information erhalten, um sie zur Unsicherheit in Bezug zu setzen. Die Beobachtung der Abstände wird hier statistisch modelliert. Mit einer Bayes-Klassifikation erhalten wir Abstandsklassen, aus denen wir auf die topologische Relation zurückschließen und deren Wahrscheinlichkeit wir so angeben können.

    @InProceedings{winter1996beobachtungsunsicherheit,
    title = {Beobachtungsunsicherheit und topologische Relationen},
    author = {Winter, Stephan},
    booktitle = {Workshop Datenqualit\"at und Metainformation in Geo-Informationssystemen},
    year = {1996},
    address = {Rostock},
    pages = {141--154},
    abstract = {In diesem Beitrag wird eine Methode zur Bestimmung der topologischen Relationen zwischen zwei in ihrer Lage unsicheren Regionen vorgestellt. Die Bestimmung basiert auf den Extremwerten einer Abstandsfunktion entlang eines Skeletts durch unsichere Schnittmengen zwischen den Regionen. Mit einer solchen Repr\"asentation vertiefen wir die Relationskonzepte der 9-Intersektion, indem wir \"uber topologisch invariante Merkmale hinaus auch metrische Information erhalten, um sie zur Unsicherheit in Bezug zu setzen. Die Beobachtung der Abst\"ande wird hier statistisch modelliert. Mit einer Bayes-Klassifikation erhalten wir Abstandsklassen, aus denen wir auf die topologische Relation zur\"uckschlie{\ss}en und deren Wahrscheinlichkeit wir so angeben k\"onnen.},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Winter1996Beobachtungsunsicherheit.pdf},
    }

  • S. Winter, “Distances for Uncertain Topological Relations,” in ESF-NSF Summer Institute in Geographic Information, Berlin, Germany, 1996.
    [BibTeX] [PDF]

    Considering uncertainty of spatial data in any GIS analysis is a theme of actual research, but far from practicability. One prerequisite is the availability of descriptions concerning uncertainty (meta-data), and another is the attachment of uncertainty to spatial analysis. In our contribution we propose a method to determine the topological relation between two positional uncertain regions. The decision is based on morphological distances along a skeleton through uncertain intersection sets. These measures are equivalent to the known representation by intersection sets, but yield additionally metric information. These distances are applied to a statistical model of the observation process. A Bayesian classification yields distance classes which allow to deduce the topological relationship. We discuss also importance and perspectives of the method.

    @InProceedings{winter1996distances,
    title = {Distances for Uncertain Topological Relations},
    author = {Winter, Stephan},
    booktitle = {ESF-NSF Summer Institute in Geographic Information},
    year = {1996},
    address = {Berlin, Germany},
    abstract = {Considering uncertainty of spatial data in any GIS analysis is a theme of actual research, but far from practicability. One prerequisite is the availability of descriptions concerning uncertainty (meta-data), and another is the attachment of uncertainty to spatial analysis. In our contribution we propose a method to determine the topological relation between two positional uncertain regions. The decision is based on morphological distances along a skeleton through uncertain intersection sets. These measures are equivalent to the known representation by intersection sets, but yield additionally metric information. These distances are applied to a statistical model of the observation process. A Bayesian classification yields distance classes which allow to deduce the topological relationship. We discuss also importance and perspectives of the method.},
    city = {Bonn},
    proceeding = {ESF-NSF Summer Institute in Geographic Information},
    url = {https://www.ipb.uni-bonn.de/pdfs/Winter1996Distances.pdf},
    }

  • S. Winter and A. Car, “Report from ESF/NCGIA Summer Institute in GIS, Berlin,” in Summer Institute in Geographic Information, 1996.
    [BibTeX] [PDF]
    [none]
    @InProceedings{winter1996report,
    title = {Report from ESF/NCGIA Summer Institute in GIS, Berlin},
    author = {Winter, Stephan and Car, Adrijana},
    booktitle = {Summer Institute in Geographic Information},
    year = {1996},
    abstract = {[none]},
    city = {Bonn},
    proceeding = {1996},
    url = {https://www.ipb.uni-bonn.de/pdfs/Winter1996Report.pdf},
    }

1995

  • R. Becker, “Untersuchung von ID3 zur Klassifikation von Bildmerkmalen,” Diplomarbeit Master Thesis, 1995.
    [BibTeX]
    [none]
    @MastersThesis{becker1995untersuchung,
    title = {Untersuchung von ID3 zur Klassifikation von Bildmerkmalen},
    author = {Becker, Ralf},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1995},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. Felicitas Lang},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • U. Brandes, “Rekonstruktion bewegter Wasseroberflächen anhand ihrer lichtbrechenden Wirkung,” Diplomarbeit Master Thesis, 1995.
    [BibTeX]
    [none]
    @MastersThesis{brandes1995rekonstruktion,
    title = {Rekonstruktion bewegter Wasseroberfl\"achen anhand ihrer lichtbrechenden Wirkung},
    author = {Brandes, Ulf},
    school = {Institute of Photogrammetry, University of Bonn In Zusammenarbeit mit dem Physikalischen Institut der Universit\"at Bonn},
    year = {1995},
    note = {Betreuung: Prof. Klaus Heinloth, Prof. Dr.-Ing. Wolfgang F\"orstner},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • C. Braun, T. H. Kolbe, F. Lang, W. Schickler, V. Steinhage, A. B. Cremers, W. Förstner, and L. Plümer, “Models for Photogrammetric Building Reconstruction,” in Computer und Graphics, 1995, p. 109–118. doi:10.1016/0097-8493(94)00126-J
    [BibTeX] [PDF]

    The paper discusses the modeling necessary for recovering man made objects – in this case buildings – in complex scenes from digital imagery. The approach addresses all levels of image analysis for deriving semantically meaningful descriptions of the scene from the image, via the geometrical/physical model of the objects and their counterparts in the image. The central link between raster image and scene are network-like organized aspects of parts of the objects. This is achieved by generically modelling the objects using parametrized volume primitives together with the application specific constraints, which seems to be adequate for many types of buildings. The paper sketches the various interrelationships between the different models and their use for feature extraction, hypothesis generation and verification.

    @InProceedings{braun1995models,
    title = {Models for Photogrammetric Building Reconstruction},
    author = {Braun, Carola and Kolbe, Thomas H. and Lang, Felicitas and Schickler, Wolfgang and Steinhage, Volker and Cremers, Armin B. and F\"orstner, Wolfgang and Pl\"umer, Lutz},
    booktitle = {Computer und Graphics},
    year = {1995},
    pages = {109--118},
    volume = {19},
    abstract = {The paper discusses the modeling necessary for recovering man made objects - in this case buildings - in complex scenes from digital imagery. The approach addresses all levels of image analysis for deriving semantically meaningful descriptions of the scene from the image, via the geometrical/physical model of the objects and their counterparts in the image. The central link between raster image and scene are network-like organized aspects of parts of the objects. This is achieved by generically modelling the objects using parametrized volume primitives together with the application specific constraints, which seems to be adequate for many types of buildings. The paper sketches the various interrelationships between the different models and their use for feature extraction, hypothesis generation and verification.},
    city = {Bonn},
    doi = {10.1016/0097-8493(94)00126-J},
    proceeding = {Computer & Graphics, Vol. 19},
    url = {https://www.ipb.uni-bonn.de/pdfs/Braun1995Models.pdf},
    }

  • W. Förstner, “Mid-Level Vision Processes for Automatic Building Extraction,” in Automatic Extraction of Man-Made Objects from Aerial and Space Images, 1995, p. 179–188. doi:10.1007/978-3-0348-9242-1_17
    [BibTeX] [PDF]

    Mid-level processes in vision are understood to produce structured descriptions of images without relying on very specific semantic scene knowledge. Automatic building extraction can use geometric models to a large extent. Geometric hypotheses may be inferred from the given data in 2D or 3D and represent elementary constraints as incidence or collinearity or more specific relations as symmetries. The inferred hypothesis may lead to difficulties during spatial inference due to noise and to inconsistent and mutually dependent constraints. The paper discusses the selection of mutually not-contradicting constraints via robust estimation and the selection of a set of independent constraints as a prerequisite for an optimal estimation of the objects shape. Examples from the analysis of image and range data are given.

    @InProceedings{forstner1995mid,
    title = {Mid-Level Vision Processes for Automatic Building Extraction},
    author = {F\"orstner, Wolfgang},
    booktitle = {Automatic Extraction of Man-Made Objects from Aerial and Space Images},
    year = {1995},
    editor = {Gruen, A. and Kuebler, O. and Agouris, P.},
    pages = {179--188},
    abstract = {Mid-level processes in vision are understood to produce structured descriptions of images without relying on very specific semantic scene knowledge. Automatic building extraction can use geometric models to a large extent. Geometric hypotheses may be inferred from the given data in 2D or 3D and represent elementary constraints as incidence or collinearity or more specific relations as symmetries. The inferred hypothesis may lead to difficulties during spatial inference due to noise and to inconsistent and mutually dependent constraints. The paper discusses the selection of mutually not-contradicting constraints via robust estimation and the selection of a set of independent constraints as a prerequisite for an optimal estimation of the objects shape. Examples from the analysis of image and range data are given.},
    city = {Bonn},
    doi = {10.1007/978-3-0348-9242-1_17},
    proceeding = {Automatic Extraction of Man-Made Objects from Aerial and Space Images},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1995Mid.pdf},
    }

  • W. Förstner, “GIS – The Third Dimension,” in Proc. IUSM WG on GIS/LIS Workshop “Current Status and Challenges of Geoinformation Systems”, Hannover, 1995.
    [BibTeX] [PDF]

    The paper discusses the problem areas in establishing 3D-Geoinformation Systems. Among the many applications it restricts to the 3D-modelling of cities. Acquisition methods for 3D-data and examples for the use of such 3D-models are presented. Finaly, a 2D-representation for 3D-objects with vertical walls but without passages is proposed which may be used for storing buildings and which may form a link to CAD-systems.

    @InProceedings{forstner1995gis,
    title = {GIS - The Third Dimension},
    author = {F\"orstner, Wolfgang},
    booktitle = {Proc. IUSM WG on GIS/LIS Workshop "Current Status and Challenges of Geoinformation Systems"},
    year = {1995},
    address = {Hannover},
    abstract = {The paper discusses the problem areas in establishing 3D-Geoinformation Systems. Among the many applications it restricts to the 3D-modelling of cities. Acquisition methods for 3D-data and examples for the use of such 3D-models are presented. Finaly, a 2D-representation for 3D-objects with vertical walls but without passages is proposed which may be used for storing buildings and which may form a link to CAD-systems.},
    city = {Bonn},
    proceeding = {Proc. IUSM WG on GIS/LIS Workshop #Current##Status##and##Challenges##of##Geoinformation##Systems#,},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1995GIS.pdf},
    }

  • W. Förstner, “The Role of Robustness in Computer Vision,” in Proc. Workshop “Milestones in Computer Vision”, Vorau, 1995.
    [BibTeX] [PDF]

    The paper discusses tools of diagnostics and robustness in the context of automating vision. Motivation is the building of so-called traffic light programs which contain a reliable selfdiagnosis enabling to chain vision modules. Special attention is payed to show the prerequisites for using tools for quality evaluation. The paper concludes with open questions.

    @InProceedings{forstner1995role,
    title = {The Role of Robustness in Computer Vision},
    author = {F\"orstner, Wolfgang},
    booktitle = {Proc. Workshop "Milestones in Computer Vision"},
    year = {1995},
    address = {Vorau},
    abstract = {The paper discusses tools of diagnostics and robustness in the context of automating vision. Motivation is the building of so-called traffic light programs which contain a reliable selfdiagnosis enabling to chain vision modules. Special attention is payed to show the prerequisites for using tools for quality evaluation. The paper concludes with open questions.},
    city = {Bonn},
    proceeding = {Proc. Workshop #Milestones##in##Computer##Vision#},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1995Role.pdf},
    }

  • W. Förstner, “A Unified Framework for the Automatic Matching of Points and Lines in Multiple Oriented Images,” in Photogrammetric Week 95, 1995, p. 173–183.
    [BibTeX] [PDF]

    The paper discusses the main aspects of automatic point transfer as basis for determining the orientation of digital imagery. Point selection, matching techniques, the role of approximate values, the object structure and the available constraints are discussed. The strategies of three approaches for point transfer in aerial triangulation are compared.

    @InProceedings{forstner1995unified,
    title = {A Unified Framework for the Automatic Matching of Points and Lines in Multiple Oriented Images},
    author = {F\"orstner, Wolfgang},
    booktitle = {Photogrammetric Week 95},
    year = {1995},
    pages = {173--183},
    abstract = {The paper discusses the main aspects of automatic point transfer as basis for determining the orientation of digital imagery. Point selection, matching techniques, the role of approximate values, the object structure and the available constraints are discussed. The strategies of three approaches for point transfer in aerial triangulation are compared.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1995Unified.pdf},
    }

  • W. Förstner, “A Personal View,” in GIM International, , 1995, p. 89.
    [BibTeX]
    @InBook{foerstner95personal,
    author = {W. F{\"o}rstner},
    title = {{A Personal View}},
    booktitle = {GIM International},
    year = {1995},
    pages = {89},
    }

  • W. Förstner and U. Weidner, “Towards Automatic Building Reconstruction from High Resolution Digital Elevation Models,” in ISPRS Journal, 1995, p. 38–49.
    [BibTeX] [PDF]

    The paper deals with an approach for extracting the 3D-shape of buildings from high resolution Digital Elevation Models (DEMs), having a grid resolution between 0.5 and 5m. The steps of the proposed procedure increasingly use explicit domain knowledge, specifically geometric constraints in the form of parametric and prismatic building models. A new MDL-based approach generating a polygonal ground plan from segment boundaries is given. The used knowledge is object related making adaption to data of different density and resolution simple and transparent.

    @InProceedings{forstner1995towards,
    title = {Towards Automatic Building Reconstruction from High Resolution Digital Elevation Models},
    author = {F\"orstner, Wolfgang and Weidner, Uwe},
    booktitle = {ISPRS Journal},
    year = {1995},
    pages = {38--49},
    abstract = {The paper deals with an approach for extracting the 3D-shape of buildings from high resolution Digital Elevation Models (DEMs), having a grid resolution between 0.5 and 5m. The steps of the proposed procedure increasingly use explicit domain knowledge, specifically geometric constraints in the form of parametric and prismatic building models. A new MDL-based approach generating a polygonal ground plan from segment boundaries is given. The used knowledge is object related making adaption to data of different density and resolution simple and transparent.},
    city = {Bonn},
    proceeding = {ISPRS Journal},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1995Towards.pdf},
    }

  • W. Förstner, U. Weidner, and A. Brunn, “Model-based 2D-Shape Recovery,” in Mustererkennung, 1995, p. 260–268.
    [BibTeX] [PDF]

    The paper presents a new approach for the reconstruction of polygons using local and global conctraints. The MDL-based solution is shown to be useful for analysing range and image data of buildings. Paper at 17th DAGM symposium ’95, Bielefeld, September 13.-15.

    @InProceedings{forstner1995model,
    title = {Model-based 2D-Shape Recovery},
    author = {F\"orstner, Wolfgang and Weidner, Uwe and Brunn, Ansgar},
    booktitle = {Mustererkennung},
    year = {1995},
    editor = {Sagerer, G.},
    pages = {260--268},
    abstract = {The paper presents a new approach for the reconstruction of polygons using local and global conctraints. The MDL-based solution is shown to be useful for analysing range and image data of buildings. Paper at 17th DAGM symposium '95, Bielefeld, September 13.-15.},
    city = {Bonn},
    proceeding = {Mustererkennung},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1995Model.pdf},
    }

  • H. Fitz, “Kantenextraktion mittels Dynamischer Programmierung,” Diplomarbeit Master Thesis, 1995.
    [BibTeX]
    [none]
    @MastersThesis{fitz1995kantenextraktion,
    title = {Kantenextraktion mittels Dynamischer Programmierung},
    author = {Fitz, Helmut},
    school = {Institute of Photogrammetry, University of Bonn In Zusammenarbeit mit dem Institut f\"ur Informatik der Universit\"at Bonn},
    year = {1995},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Prof. Dr. Veenker},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • C. Fuchs and W. Förstner, “Polymorphic Grouping for Image Segmentation,” in Proc. of the 5th ICCV 1995, Cambridge, U.S.A., 1995. doi:10.1109/ICCV.1995.466789
    [BibTeX] [PDF]

    The paper describes a new approach to image segmentation. It accepts the inherent deficiencies occuring when extracting low-level features and when dealing with the complexity of real scenes. Image segmentation therefore is understood as deriving a rich symbolic description useful for tasks such as stereo or object recognition in outdoor scenes. The approach is based on a polymorphic scheme for simultaneously extracting points, lines and segments in a topologically consistent manner, together with their mutual relations derived from the feature adjacency graph (FAG) thereby performing several grouping steps which gradually use more and more specific domain knowledge to achieve an optimal image description. The heart of the approach is 1.) a detailed analysis of the FAG and 2.) a robust estimation for validating the found geometric hypotheses. The analysis of the FAG, derived from the exoskeleton of the features, allows to detect inconsistencies of the extracted features with the ideal image model, a cell-complex. The FAG is used for finding hypotheses about incidence relations and geometric hypotheses, such as collinearity or parallelity, also between non-neighbored points and lines. The M-type robust estimation is used for simultaneously eliminating wrong hypotheses on geometric relationships. It uses a new argument for the weighting function.

    @InProceedings{fuchs1995polymorphic,
    title = {Polymorphic Grouping for Image Segmentation},
    author = {Fuchs, Claudia and F\"orstner, Wolfgang},
    booktitle = {Proc. of the 5th ICCV 1995},
    year = {1995},
    address = {Cambridge, U.S.A.},
    abstract = {The paper describes a new approach to image segmentation. It accepts the inherent deficiencies occuring when extracting low-level features and when dealing with the complexity of real scenes. Image segmentation therefore is understood as deriving a rich symbolic description useful for tasks such as stereo or object recognition in outdoor scenes. The approach is based on a polymorphic scheme for simultaneously extracting points, lines and segments in a topologically consistent manner, together with their mutual relations derived from the feature adjacency graph (FAG) thereby performing several grouping steps which gradually use more and more specific domain knowledge to achieve an optimal image description. The heart of the approach is 1.) a detailed analysis of the FAG and 2.) a robust estimation for validating the found geometric hypotheses. The analysis of the FAG, derived from the exoskeleton of the features, allows to detect inconsistencies of the extracted features with the ideal image model, a cell-complex. The FAG is used for finding hypotheses about incidence relations and geometric hypotheses, such as collinearity or parallelity, also between non-neighbored points and lines. The M-type robust estimation is used for simultaneously eliminating wrong hypotheses on geometric relationships. It uses a new argument for the weighting function.},
    city = {Cambridge, U.S.A.},
    doi = {10.1109/ICCV.1995.466789},
    proceeding = {5th ICCV},
    url = {https://www.ipb.uni-bonn.de/pdfs/Fuchs1995Polymorphic.pdf},
    }

  • K. Kulschewski, “Verfahren zur robusten Schätzung der relativen Orientierungselemente,” Diplomarbeit Master Thesis, 1995.
    [BibTeX]
    [none]
    @MastersThesis{kulschewski1995verfahren,
    title = {Verfahren zur robusten Sch\"atzung der relativen Orientierungselemente},
    author = {Kulschewski, Kai},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1995},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. Wolfgang Schickler},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • F. Lang and W. Förstner, “Matching Techniques,” in Proc.: 2nd Course in Digital Photogrammetry, 1995.
    [BibTeX] [PDF]

    One of the central tasks in Photogrammetry and Computer Vision is the localization and reconstruction of objects in the scene. Localization aims at determining the pose , i. e. the position and the orientation of an object. It assumes the form of the object to be known or at least to be known up to some structural or numerical parameters and the mutual relation between the reference frames, i. e. coordinate systems of the object and the cameras to be determined. Reconstruction , on the other hand aims at determining the form possibly also the structure of the object. The form description need not, but may be related to a e. g. object centred, reference coordinate system. In all cases the central tasks is to match the description of one or several images to the description of the images or objects, i.e. to establish correspondence. In all cases automating localization and reconstruction requires to establish the correspondence or match between several images or between one or several images and a model. We therefore may distinguish several cases: * Image Matching * Object Localization * Object Reconstruction which are discussed in this publication.

    @InProceedings{lang1995matching,
    title = {Matching Techniques},
    author = {Lang, Felicitas and F\"orstner, Wolfgang},
    booktitle = {Proc.: 2nd Course in Digital Photogrammetry},
    year = {1995},
    abstract = {One of the central tasks in Photogrammetry and Computer Vision is the localization and reconstruction of objects in the scene. Localization aims at determining the pose , i. e. the position and the orientation of an object. It assumes the form of the object to be known or at least to be known up to some structural or numerical parameters and the mutual relation between the reference frames, i. e. coordinate systems of the object and the cameras to be determined. Reconstruction , on the other hand aims at determining the form possibly also the structure of the object. The form description need not, but may be related to a e. g. object centred, reference coordinate system. In all cases the central tasks is to match the description of one or several images to the description of the images or objects, i.e. to establish correspondence. In all cases automating localization and reconstruction requires to establish the correspondence or match between several images or between one or several images and a model. We therefore may distinguish several cases: * Image Matching * Object Localization * Object Reconstruction which are discussed in this publication.},
    city = {Bonn},
    proceeding = {Proc.: 2nd Course in Digital Photogrammetry},
    url = {https://www.ipb.uni-bonn.de/pdfs/Lang1995Matching.pdf},
    }

  • F. Lang, T. Löcherbach, and W. Schickler, “A one-eye Stereo System for Semi-Automatic 3D-Building Extraction,” in Geomatics Info Magazine, 1995.
    [BibTeX] [PDF]

    We present a semi-automatic system for building extraction that has been developed at the Institute for Photogrammetry at the University of Bonn. Digitized images are used as data source. Data capture takes place at a simple workstation. Single point measurement is replaced by the measurement of building models. Automation supports the operator and thus increases the system performance. The representation of building models allows a link to CAD, GIS and computer graphics.

    @InProceedings{lang1995one,
    title = {A one-eye Stereo System for Semi-Automatic 3D-Building Extraction},
    author = {Lang, Felicitas and L\"ocherbach, Thomas and Schickler, Wolfgang},
    booktitle = {Geomatics Info Magazine},
    year = {1995},
    abstract = {We present a semi-automatic system for building extraction that has been developed at the Institute for Photogrammetry at the University of Bonn. Digitized images are used as data source. Data capture takes place at a simple workstation. Single point measurement is replaced by the measurement of building models. Automation supports the operator and thus increases the system performance. The representation of building models allows a link to CAD, GIS and computer graphics.},
    city = {Bonn},
    proceeding = {Geomatics Info Magazine},
    url = {https://www.ipb.uni-bonn.de/pdfs/Lang1995one.pdf},
    }

  • M. Pospiech, “Extraktion von Gebäudeumrissen aus hochauflösenden Digitalen Höhenmodellen,” Diplomarbeit Master Thesis, 1995.
    [BibTeX]
    [none]
    @MastersThesis{pospiech1995extraktion,
    title = {Extraktion von Geb\"audeumrissen aus hochaufl\"osenden Digitalen H\"ohenmodellen},
    author = {Pospiech, Michael},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1995},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. Uwe Weidner},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • U. Scheuß, “Entwicklung eines autonomen Kalibrierungsverfahrens für das Stereokamerasystem eines Roboters,” Diplomarbeit Master Thesis, 1995.
    [BibTeX]
    [none]
    @MastersThesis{scheuss1995entwicklung,
    title = {Entwicklung eines autonomen Kalibrierungsverfahrens f\"ur das Stereokamerasystem eines Roboters},
    author = {Scheu{\ss}, Ulrich},
    school = {Institute of Photogrammetry, University of Bonn In Zusammenarbeit mit dem Institut f\"ur Informatik der Universit\"at Bonn},
    year = {1995},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Prof. Dr. Joachim Buhmann},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • U. Weidner, “Building Extraction from Digital Elevation Models,” Institut für Photogrammetrie, Universität Bonn 1995.
    [BibTeX] [PDF]

    Kurzdokumentation über die Handhabung des Programms bex.

    @TechReport{weidner1995building,
    title = {Building Extraction from Digital Elevation Models},
    author = {Weidner, Uwe},
    institution = {Institut f\"ur Photogrammetrie, Universit\"at Bonn},
    year = {1995},
    abstract = {Kurzdokumentation \"uber die Handhabung des Programms bex.},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Weidner1995Building.pdf},
    }

  • U. Weidner, “Evaluation of Building Extraction from Digital Elevation Models,” Institut für Photogrammetrie, Universität Bonn 1995.
    [BibTeX] [PDF]

    The technical report describes the basics of our approach for building extraction using automatically derived Digital Elevation Models (DEMs), and the results for the ISPRS test data sets.

    @TechReport{weidner1995evaluation,
    title = {Evaluation of Building Extraction from Digital Elevation Models},
    author = {Weidner, Uwe},
    institution = {Institut f\"ur Photogrammetrie, Universit\"at Bonn},
    year = {1995},
    abstract = {The technical report describes the basics of our approach for building extraction using automatically derived Digital Elevation Models (DEMs), and the results for the ISPRS test data sets.},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Weidner1995Evaluation.pdf},
    }

  • U. Weidner, “Interpretation von Digitalen Höhenmodellen,” in Talk at SFB Workshop Das Relief, 1995.
    [BibTeX] [PDF]

    The paper is an abstract of a talk concerning topics related to the interpretation of Digital Elevation Models.

    @InProceedings{weidner1995interpretation,
    title = {Interpretation von Digitalen H\"ohenmodellen},
    author = {Weidner, Uwe},
    booktitle = {Talk at SFB Workshop Das Relief},
    year = {1995},
    abstract = {The paper is an abstract of a talk concerning topics related to the interpretation of Digital Elevation Models.},
    city = {Bonn},
    proceeding = {Talk at SFB Workshop Das Relief},
    url = {https://www.ipb.uni-bonn.de/pdfs/Weidner1995Interpretation.pdf},
    }

  • U. Weidner, “A Metric for Comparing Symmetric Positive Definite Matrices,” Institut für Photogrammetrie, Universität Bonn 1995.
    [BibTeX]

    The note proposes a metric for comparing symmetric positive definite matrices. Symmetric positive definite matrices can be interpreted as covariance matrices. The metric is shown to be invariant with respect to common affine transformations of the reference coordinate system of the covariance matrices and to inversions of the matrices, thus also measures the distance between the corresponding weight matrices.

    @TechReport{weidner1995metric,
    title = {A Metric for Comparing Symmetric Positive Definite Matrices},
    author = {Weidner, Uwe},
    institution = {Institut f\"ur Photogrammetrie, Universit\"at Bonn},
    year = {1995},
    abstract = {The note proposes a metric for comparing symmetric positive definite matrices. Symmetric positive definite matrices can be interpreted as covariance matrices. The metric is shown to be invariant with respect to common affine transformations of the reference coordinate system of the covariance matrices and to inversions of the matrices, thus also measures the distance between the corresponding weight matrices.},
    city = {Bonn},
    }

  • S. Winter, “Topological Relations between Discrete Regions,” in Advances in Spatial Databases Proc. of the Fourth International Symposium on Large Spatial Databases (SSD ’95), Portland, Maine, 1995, p. 310–327. doi:10.1007/3-540-60159-7_19
    [BibTeX] [PDF]

    Topological reasoning is important for speeding up spatial queries, e.g. in GIS or in AI (robotics). While topological relations between spatial objects in the vector model (R2) are investigated thoroughly, we run into inconsistencies in the raster model (Z2). But instead of reducing our requirements in case of reasoning in raster images we change from simple raster to a cellular decomposition of R2 – what we call a hyper-raster – which is also discrete, but preserves the topology of R2. The finite representation limits the computational effort against the vector model. We will introduce a data structure for the hyper-raster, which represents regions, curves and points. Then we will present algorithms for digitization and elementary image processing in hyper-raster. With that the intersection sets, as needed for the determination of a topological relation between two objects, are calculated simply by logical joins of binary images. Without extending our model we can also compute further refinements of the relationship. We will show that applying the 9-intersection, originally developed for the vector model, to the hyper-raster leads exactly to the set of known eight relations between regions without holes in R2.

    @InProceedings{winter1995topological,
    title = {Topological Relations between Discrete Regions},
    author = {Winter, Stephan},
    booktitle = {Advances in Spatial Databases Proc. of the Fourth International Symposium on Large Spatial Databases (SSD '95)},
    year = {1995},
    address = {Portland, Maine},
    editor = {Egenhofer, Max J. and Herring, John R.},
    pages = {310--327},
    abstract = {Topological reasoning is important for speeding up spatial queries, e.g. in GIS or in AI (robotics). While topological relations between spatial objects in the vector model (R2) are investigated thoroughly, we run into inconsistencies in the raster model (Z2). But instead of reducing our requirements in case of reasoning in raster images we change from simple raster to a cellular decomposition of R2 - what we call a hyper-raster - which is also discrete, but preserves the topology of R2. The finite representation limits the computational effort against the vector model. We will introduce a data structure for the hyper-raster, which represents regions, curves and points. Then we will present algorithms for digitization and elementary image processing in hyper-raster. With that the intersection sets, as needed for the determination of a topological relation between two objects, are calculated simply by logical joins of binary images. Without extending our model we can also compute further refinements of the relationship. We will show that applying the 9-intersection, originally developed for the vector model, to the hyper-raster leads exactly to the set of known eight relations between regions without holes in R2.},
    city = {Bonn},
    doi = {10.1007/3-540-60159-7_19},
    proceeding = {Advances in Spatial Databases Proc. of the Fourth International Symposium on Large Spatial Databases (SSD 95)},
    url = {https://www.ipb.uni-bonn.de/pdfs/Winter1995Topological.pdf},
    }

  • S. Winter, “DT – Modul zur Distanztransformation,” Institute of Photogrammetry, University of Bonn 1995.
    [BibTeX] [PDF]
    [none]
    @TechReport{winter1995dt,
    title = {DT - Modul zur Distanztransformation},
    author = {Winter, Stephan},
    institution = {Institute of Photogrammetry, University of Bonn},
    year = {1995},
    abstract = {[none]},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Winter1995DT.pdf},
    }

  • S. Winter, “Fourth Symposium on Large Spatial Databases (SSD 95),” Institute of Photogrammetry, University of Bonn 1995.
    [BibTeX] [PDF]
    [none]
    @TechReport{winter1995fourth,
    title = {Fourth Symposium on Large Spatial Databases (SSD 95)},
    author = {Winter, Stephan},
    institution = {Institute of Photogrammetry, University of Bonn},
    year = {1995},
    abstract = {[none]},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Winter1995Fourth.pdf},
    }

  • M. Wydera, “Automatische Bestimmung von Fluchtpunkten,” Diplomarbeit Master Thesis, 1995.
    [BibTeX]
    [none]
    @MastersThesis{wydera1995automatische,
    title = {Automatische Bestimmung von Fluchtpunkten},
    author = {Wydera, Markus},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1995},
    note = {Betreuung: Dr.-Ing. Carola Braun, Prof. Dr.-Ing. Wolfgang F\"orstner},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • Second Course in Digital PhotogrammetryBonn: , 1995.
    [BibTeX]
    [none]
    @Proceedings{forstner1995proc,
    title = {Second Course in Digital Photogrammetry},
    year = {1995},
    address = {Bonn},
    editor = {W. F\"orstner and G. Ditze},
    abstract = {[none]},
    city = {Bonn},
    }

1994

  • C. Braun, T. H. Kolbe, F. Lang, W. Schickler, V. Steinhage, A. B. Cremers, W. Förstner, and L. Plümer, “Models for Photogrammetric Building Reconstruction,” in Computers & Graphics, 1994. doi:10.1016/0097-8493(94)00126-J
    [BibTeX] [PDF]

    The paper discusses the modeling necessary for recovering man made object – in this case buildings – in complex scenes from digital imagery. The approach addresses all levels of image analysis for deriving semantically meaningful descriptions of the scene from the image, via the geometrical / physical model of the objects and their counterparts in the image. The central link between raster image and scene are network-like organized aspects of parts of the objects. This is achieved by generically modelling the objects using parametrized volume primitives together with the application specific constraints, which seems to be adequate for many types of buildings. The paper sketches the various interrelationships between the different models and their use for feature extraction, hypothesis generation and verification.

    @InProceedings{braun1994models,
    title = {Models for Photogrammetric Building Reconstruction},
    author = {Braun, Carola and Kolbe, Thomas H. and Lang, Felicitas and Schickler, Wolfgang and Steinhage, Volker and Cremers, Armin B. and F\"orstner, Wolfgang and Pl\"umer, Lutz},
    booktitle = {Computers \& Graphics},
    year = {1994},
    abstract = {The paper discusses the modeling necessary for recovering man made object - in this case buildings - in complex scenes from digital imagery. The approach addresses all levels of image analysis for deriving semantically meaningful descriptions of the scene from the image, via the geometrical / physical model of the objects and their counterparts in the image. The central link between raster image and scene are network-like organized aspects of parts of the objects. This is achieved by generically modelling the objects using parametrized volume primitives together with the application specific constraints, which seems to be adequate for many types of buildings. The paper sketches the various interrelationships between the different models and their use for feature extraction, hypothesis generation and verification.},
    city = {Bonn},
    doi = {10.1016/0097-8493(94)00126-J},
    proceeding = {1994},
    url = {https://www.ipb.uni-bonn.de/pdfs/Braun1994Models.pdf},
    }

  • W. Förstner, “Diagnostics and Performance Evaluation in Computer Vision,” in Performance versus Methodology in Computer Vision, NSF/ARPA Workshop, Seattle, 1994, p. 11–25.
    [BibTeX] [PDF]

    Increasing the performance of Computer Vision algorithms requires both, robust procedures for handling non-modelled errors and diagnostic tools for achieving autonomy in the evaluation of the achieved results. The role of diagnostic tools for model evaluation and performance prediction is discussed. Quality or performance refers to: 1. the precision of the estimated quantities (efficiency) 2. the sensitivity of the estimated quantities with respect to systematic and gross errors. 3. the design of the experiment or the used actual data. 4. the correctness of results and of reports on the correctness of the result. The performance may be evaluated a. by controlled tests using simulated or real data. This is necessary to prove either the usefulness of the algorithms or the adequateness of the used model. b. by diagnostic tools. This is necessary for achieving autonomy in the chain of automatic procedures within a complete system where generally no reference data are available. The performance of Computer Vision algorithms can be significantly increased by diagnostic tools, both by detecting singular or weak configurations within high break down estimation, e. g. RANSAC, and by providing a highly reliable selfdiagnosis of the algorithm itself using the internally available redundancy. Results from extensive empirical tests demonstrate the feasibility of the proposed tools.

    @InProceedings{forstner1994diagnostics,
    title = {Diagnostics and Performance Evaluation in Computer Vision},
    author = {F\"orstner, Wolfgang},
    booktitle = {Performance versus Methodology in Computer Vision, NSF/ARPA Workshop},
    year = {1994},
    address = {Seattle},
    pages = {11--25},
    abstract = {Increasing the performance of Computer Vision algorithms requires both, robust procedures for handling non-modelled errors and diagnostic tools for achieving autonomy in the evaluation of the achieved results. The role of diagnostic tools for model evaluation and performance prediction is discussed. Quality or performance refers to: 1. the precision of the estimated quantities (efficiency) 2. the sensitivity of the estimated quantities with respect to systematic and gross errors. 3. the design of the experiment or the used actual data. 4. the correctness of results and of reports on the correctness of the result. The performance may be evaluated a. by controlled tests using simulated or real data. This is necessary to prove either the usefulness of the algorithms or the adequateness of the used model. b. by diagnostic tools. This is necessary for achieving autonomy in the chain of automatic procedures within a complete system where generally no reference data are available. The performance of Computer Vision algorithms can be significantly increased by diagnostic tools, both by detecting singular or weak configurations within high break down estimation, e. g. RANSAC, and by providing a highly reliable selfdiagnosis of the algorithm itself using the internally available redundancy. Results from extensive empirical tests demonstrate the feasibility of the proposed tools.},
    city = {Bonn},
    proceeding = {Performance versus Methodology in Computer Vision, NSF/ARPA Workshop},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1994Diagnostics.pdf},
    }

  • W. Förstner, “A Framework for Low-Level Feature Extraction,” in ECCV’94, 1994, p. 383–394. doi:10.1007/BFb0028370
    [BibTeX] [PDF]

    The paper presents a framework for extracting low level features, namely points, edges and segments from digital images. It is based on generic models for the scene, the sensing and the image. Its main goal is to explicitely exploit the information content of the image as far as possible. This leads to new techniques for deriving image parameters, to either the elimination or the elucidation of “buttons”, like thresholds, and to interpretable quality measures for the results, which may be used in subsequent steps. The feature extraction is based on local statistics of the image function, namely the average squared gradient and on the regularity of the image function with respect to junctions and circular symmetric features as special cases of Bigun’s (1990) spiral type features. Methods are provided for blind estimation of a signal dependent noise variance, for feature preserving restoration, for feature detection and classification and for the precise location of general edges and points. Their favorable scale space properties are discussed. In alls steps thesholding and classification is based on proper test statistics reducing threshold selection to choosing a significance level.

    @InProceedings{forstner1994framework,
    title = {A Framework for Low-Level Feature Extraction},
    author = {F\"orstner, Wolfgang},
    booktitle = {ECCV'94},
    year = {1994},
    pages = {383--394},
    volume = {801/1994},
    abstract = {The paper presents a framework for extracting low level features, namely points, edges and segments from digital images. It is based on generic models for the scene, the sensing and the image. Its main goal is to explicitely exploit the information content of the image as far as possible. This leads to new techniques for deriving image parameters, to either the elimination or the elucidation of "buttons", like thresholds, and to interpretable quality measures for the results, which may be used in subsequent steps. The feature extraction is based on local statistics of the image function, namely the average squared gradient and on the regularity of the image function with respect to junctions and circular symmetric features as special cases of Bigun's (1990) spiral type features. Methods are provided for blind estimation of a signal dependent noise variance, for feature preserving restoration, for feature detection and classification and for the precise location of general edges and points. Their favorable scale space properties are discussed. In alls steps thesholding and classification is based on proper test statistics reducing threshold selection to choosing a significance level.},
    doi = {10.1007/BFb0028370},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1994Framework.pdf},
    }

  • W. Förstner, F. Lang, and C. Fuchs, “On the Noise and Scale Behaviour of Relational Descriptions,” in ISPRS Commission III, 1994.
    [BibTeX] [PDF]

    The paper presents a concept for analysing the quality of relational descriptions of digital images. The investigations are based on the relational description automatically derived by a new coherent procedure for feature extraction providing a feature adjacency graph containing points, edges and segments and their relations. A new notion of scale (integration scale) is introduced, relating to a non linear function of the image, providing new stable descriptions. Based on the feature extraction we analysed the quality of the relational descriptions in dependency on the signal-to-noise ratio and on the control parameters of the feature extraction process, i.~e. the significance level, the smoothing scale and the integration scale. First results on the quality of the features, focussing on their existence, distinct attributes and relations are given. The scope of this research is to predict the quality, especially probabilities of components of the relational description from a few measures depending on noise, scale and local properties of the image content. This is motivated by the applications we are dealing with, namely extracting man-made 2D or 3D structures by grouping procedures or image matching as both tasks are optimization problems where the probability of the unknown 2D or 3D model has to be maximized. The paper presents a concept for analysing the quality of relational descriptions of digital images. The investigations are based on the relational description automatically derived by a new coherent procedure for feature extraction providing a feature adjacency graph containing points, edges and segments and their relations. A new notion of scale (integration scale) is introduced, relating to a non linear function of the image, providing new stable descriptions. Based on the feature extraction we analysed the quality of the relational descriptions in dependency on the signal-to-noise ratio and on the control parameters of the feature extraction process, i.~e. the significance level, the smoothing scale and the integration scale. First results on the quality of the features, focussing on their existence, distinct attributes and relations are given. The scope of this research is to predict the quality, especially probabilities of components of the relational description from a few measures depending on noise, scale and local properties of the image content. This is motivated by the applications we are dealing with, namely extracting man-made 2D or 3D structures by grouping procedures or image matching as both tasks are optimization problems where the probability of the unknown 2D or 3D model has to be maximized.

    @InProceedings{forstner1994noise,
    title = {On the Noise and Scale Behaviour of Relational Descriptions},
    author = {F\"orstner, Wolfgang and Lang, Felicitas and Fuchs, Claudia},
    booktitle = {ISPRS Commission III},
    year = {1994},
    abstract = {The paper presents a concept for analysing the quality of relational descriptions of digital images. The investigations are based on the relational description automatically derived by a new coherent procedure for feature extraction providing a feature adjacency graph containing points, edges and segments and their relations. A new notion of scale (integration scale) is introduced, relating to a non linear function of the image, providing new stable descriptions. Based on the feature extraction we analysed the quality of the relational descriptions in dependency on the signal-to-noise ratio and on the control parameters of the feature extraction process, i.~e. the significance level, the smoothing scale and the integration scale. First results on the quality of the features, focussing on their existence, distinct attributes and relations are given. The scope of this research is to predict the quality, especially probabilities of components of the relational description from a few measures depending on noise, scale and local properties of the image content. This is motivated by the applications we are dealing with, namely extracting man-made 2D or 3D structures by grouping procedures or image matching as both tasks are optimization problems where the probability of the unknown 2D or 3D model has to be maximized. The paper presents a concept for analysing the quality of relational descriptions of digital images. The investigations are based on the relational description automatically derived by a new coherent procedure for feature extraction providing a feature adjacency graph containing points, edges and segments and their relations. A new notion of scale (integration scale) is introduced, relating to a non linear function of the image, providing new stable descriptions. Based on the feature extraction we analysed the quality of the relational descriptions in dependency on the signal-to-noise ratio and on the control parameters of the feature extraction process, i.~e. the significance level, the smoothing scale and the integration scale. First results on the quality of the features, focussing on their existence, distinct attributes and relations are given. The scope of this research is to predict the quality, especially probabilities of components of the relational description from a few measures depending on noise, scale and local properties of the image content. This is motivated by the applications we are dealing with, namely extracting man-made 2D or 3D structures by grouping procedures or image matching as both tasks are optimization problems where the probability of the unknown 2D or 3D model has to be maximized.},
    city = {Bonn},
    proceeding = {1994},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1994Noise.pdf},
    }

  • W. Förstner and H. Pan, “Generalization of Linear Patterns Based on MDL Criterion,” Institut für Photogrammetrie, Universität Bonn 1994.
    [BibTeX] [PDF]

    A domain-independent objective mechanism is developed for generalization of linear patterns. It is based on the Minimum-Description-Length principle, seeking the simplest description of a given polyline. The hypotheses are generated by the farthest point algorithm. The whole mechanism is objactive in the sense of without using any control parameter. This mechanism has been tested on segmented images and polygon maps.

    @TechReport{forstner1994generalization,
    title = {Generalization of Linear Patterns Based on MDL Criterion},
    author = {F\"orstner, Wolfgang and Pan, He-Ping},
    institution = {Institut f\"ur Photogrammetrie, Universit\"at Bonn},
    year = {1994},
    abstract = {A domain-independent objective mechanism is developed for generalization of linear patterns. It is based on the Minimum-Description-Length principle, seeking the simplest description of a given polyline. The hypotheses are generated by the farthest point algorithm. The whole mechanism is objactive in the sense of without using any control parameter. This mechanism has been tested on segmented images and polygon maps.},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1994Generalization.pdf},
    }

  • W. Förstner and J. Shao, “Gabor Wavelets for Texture Edge Extraction,” in ISPRS Commission III Symposium on Spatial Information from Digital Photogrammetry and Computer Vision, Munich, Germany, 1994.
    [BibTeX] [PDF]

    Textures in images have a natural order, both in orientation and multiple narrow-band frequency, which requires to employ multichannel local spatial/frequency filtering and orientation selectivity, and to have a multiscale characteristic. Each channel covers one part of whole frequency domain, which indicates different information for the different texton. Gabor filter, as a near orthogonal wavelet used in this paper, has orientation selectivity, multiscale property, linear phase and good localization both in spatial and frequency domains, which are suitable for texture analysis. Gabor filters are employed for clustering the similarity of same type of textons. Gaussian filters are also used for detection of normal image edges. Then hybrid texture and nontexture gradient measurement is based on fusion of the difference of amplitude of the filter responses between Gabor and Gaussian filters at neighboring pixels by mainly using average squared gradient. Normalization, beased on the noise response and based on maximum response are computed.

    @InProceedings{forstner1994gabor,
    title = {Gabor Wavelets for Texture Edge Extraction},
    author = {F\"orstner, Wolfgang and Shao, Juliang},
    booktitle = {ISPRS Commission III Symposium on Spatial Information from Digital Photogrammetry and Computer Vision},
    year = {1994},
    address = {Munich, Germany},
    abstract = {Textures in images have a natural order, both in orientation and multiple narrow-band frequency, which requires to employ multichannel local spatial/frequency filtering and orientation selectivity, and to have a multiscale characteristic. Each channel covers one part of whole frequency domain, which indicates different information for the different texton. Gabor filter, as a near orthogonal wavelet used in this paper, has orientation selectivity, multiscale property, linear phase and good localization both in spatial and frequency domains, which are suitable for texture analysis. Gabor filters are employed for clustering the similarity of same type of textons. Gaussian filters are also used for detection of normal image edges. Then hybrid texture and nontexture gradient measurement is based on fusion of the difference of amplitude of the filter responses between Gabor and Gaussian filters at neighboring pixels by mainly using average squared gradient. Normalization, beased on the noise response and based on maximum response are computed.},
    city = {Bonn},
    proceeding = {ISPRS Commission III Symposium on Spatial Information from Digital Photogrammetry and Computer Vision},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1994Gabor.pdf},
    }

  • A. Kosarek, “Analyse ausgezeichneter Lagebeziehungen geometrischer Grundelemente,” Diplomarbeit Master Thesis, 1994.
    [BibTeX]
    [none]
    @MastersThesis{kosarek1994analyse,
    title = {Analyse ausgezeichneter Lagebeziehungen geometrischer Grundelemente},
    author = {Kosarek, Armin},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1994},
    note = {Betreuung: Prof. Dr.-Ing. F\"orstner, Dipl.-Ing. Gisela Ditze},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • T. Löcherbach, “Fusion of Multi-Sensor Images and digital Map Data for the Reconstruction and Interpretation of Agricultural Land-use Units,” in ISPRS Commission III Symposium on Spatial Information from Digital Photogrammetry and Computer Vision, Munich, Germany, 1994.
    [BibTeX] [PDF]

    The paper describes an approach to reconstructing agricultural land-use areas from remotely sensed images using digital polygon maps as prior information. Our goal is to update the geometry and class label of agricultural parcels. The approach integrates an estimation of the vector polygons and the derivation of object (i. i. land-use unit) related features for the determination of the object classes. Both a feature edge model for the transition between two land-use units and the assumption of homogeneity are used to reconstruct the land-use units in a least squares approach. The theoretical concept and its technical realization are described, first results are presented and a critical evaluation of the results with a discussion of possible extension is given.

    @InProceedings{locherbach1994fusion,
    title = {Fusion of Multi-Sensor Images and digital Map Data for the Reconstruction and Interpretation of Agricultural Land-use Units},
    author = {L\"ocherbach, Thomas},
    booktitle = {ISPRS Commission III Symposium on Spatial Information from Digital Photogrammetry and Computer Vision},
    year = {1994},
    address = {Munich, Germany},
    abstract = {The paper describes an approach to reconstructing agricultural land-use areas from remotely sensed images using digital polygon maps as prior information. Our goal is to update the geometry and class label of agricultural parcels. The approach integrates an estimation of the vector polygons and the derivation of object (i. i. land-use unit) related features for the determination of the object classes. Both a feature edge model for the transition between two land-use units and the assumption of homogeneity are used to reconstruct the land-use units in a least squares approach. The theoretical concept and its technical realization are described, first results are presented and a critical evaluation of the results with a discussion of possible extension is given.},
    city = {Bonn},
    proceeding = {ISPRS Commission III Symposium on Spatial Information from Digital Photogrammetry and Computer Vision},
    url = {https://www.ipb.uni-bonn.de/pdfs/Locherbach1994Fusion.pdf},
    }

  • H. Lühring, “Radiometrische Kalibrierung der CCD-Kamera am Analytischen Auswertegerät Planicomp P 3,” Diplomarbeit Master Thesis, 1994.
    [BibTeX]
    [none]
    @MastersThesis{luhring1994radiometrische,
    title = {Radiometrische Kalibrierung der CCD-Kamera am Analytischen Auswerteger\"at Planicomp P 3},
    author = {L\"uhring, Hajo},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1994},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. Claudia Fuchs},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • H. Pan, “Two-Level Global Optimization for Image Segmentation,” ISPRS Journal of Photogrammetry and Remote Sensing, vol. 49. Jg, iss. 2, p. 21–32, 1994. doi:10.1016/0924-2716(94)90063-9
    [BibTeX]

    Domain-independent image segmentation is considered here as a global optimization problem: to seek the simplest description of a given input image in terms of coherent closed regions. The approach consists of two levels of processing: pixel-level and region-level, both based on the Minimum-Description-Length principle. Pixel-level processing leads to forming the atomic regions that are then labelled. In region-level processing neighbouring regions are merged into larger ones using an explicit attributed graph evolution mechanism. Both level processings are stopped automatically without using any heuristic control parameters. Experiments are carried out with a number of images of different scene types. Parallel implementation of region-level processing is the most difficult problem to be solved for the operational application of this approach.

    @Article{pan1994two,
    title = {Two-Level Global Optimization for Image Segmentation},
    author = {Pan, He-Ping},
    journal = {ISPRS Journal of Photogrammetry and Remote Sensing},
    year = {1994},
    number = {2},
    pages = {21--32},
    volume = {49. Jg},
    abstract = {Domain-independent image segmentation is considered here as a global optimization problem: to seek the simplest description of a given input image in terms of coherent closed regions. The approach consists of two levels of processing: pixel-level and region-level, both based on the Minimum-Description-Length principle. Pixel-level processing leads to forming the atomic regions that are then labelled. In region-level processing neighbouring regions are merged into larger ones using an explicit attributed graph evolution mechanism. Both level processings are stopped automatically without using any heuristic control parameters. Experiments are carried out with a number of images of different scene types. Parallel implementation of region-level processing is the most difficult problem to be solved for the operational application of this approach.},
    city = {Bonn},
    doi = {10.1016/0924-2716(94)90063-9},
    }

  • U. Weidner, “Parameterfree Information-Preserving Surface Restoration,” in Computer Vision ECCV, 1994. doi:10.1007/BFb0028355
    [BibTeX] [PDF]

    In this paper we present an algorithm for parameterfree information-preserving surface restoration. The algorithm is designed for 2.5D and 3D surfaces. The basic idea is to extract noise and signal properties of the data simultaneously by variance-component estimation and use this information for filtering. The variance-component estimation delivers information on how to weigh the influence of the data dependent term and the stabilizing term in regularization techniques, and therefore no parameter which controls this relation has to be set by the user. The paper is the original contribution for ECCV’94 (7 pages) as it is published by Springer.

    @InProceedings{weidner1994parameterfree,
    title = {Parameterfree Information-Preserving Surface Restoration},
    author = {Weidner, Uwe},
    booktitle = {Computer Vision ECCV},
    year = {1994},
    editor = {Eklundh, Jan-Olof},
    abstract = {In this paper we present an algorithm for parameterfree information-preserving surface restoration. The algorithm is designed for 2.5D and 3D surfaces. The basic idea is to extract noise and signal properties of the data simultaneously by variance-component estimation and use this information for filtering. The variance-component estimation delivers information on how to weigh the influence of the data dependent term and the stabilizing term in regularization techniques, and therefore no parameter which controls this relation has to be set by the user. The paper is the original contribution for ECCV'94 (7 pages) as it is published by Springer.},
    city = {Bonn},
    doi = {10.1007/BFb0028355},
    proceeding = {Computer Vision ECCV},
    url = {https://www.ipb.uni-bonn.de/pdfs/Weidner1994Parameterfree-poster.ps:PostScript;:Weidner1994Parameterfree-extended.ps:PostScript;:Weidner1994Parameterfree.pdf},
    }

  • U. Weidner, “GEO – Algorithmen der Digitalen Bildverarbeitung,” Institut für Photogrammetrie, Universität Bonn 1994.
    [BibTeX] [PDF]

    Kurzdokumentation über die Handhabung des Programms geo. Weitergehende Informationen können aus Best (1990) und Bevacqua/Floris (1987) entnommen werden.

    @TechReport{weidner1994geo,
    title = {GEO - Algorithmen der Digitalen Bildverarbeitung},
    author = {Weidner, Uwe},
    institution = {Institut f\"ur Photogrammetrie, Universit\"at Bonn},
    year = {1994},
    abstract = {Kurzdokumentation \"uber die Handhabung des Programms geo. Weitergehende Informationen k\"onnen aus Best (1990) und Bevacqua/Floris (1987) entnommen werden.},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Weidner1994GEO.pdf},
    }

  • U. Weidner, “GRINV – Gradient Inverse Weighted Smoothing,” Institut für Photogrammetrie, Universität Bonn 1994.
    [BibTeX] [PDF]

    Kurzdokumentation über die Handhabung des Programms grinv. Weitergehende Informationen können aus Wang et al. (1981) entnommen werden.

    @TechReport{weidner1994grinv,
    title = {GRINV - Gradient Inverse Weighted Smoothing},
    author = {Weidner, Uwe},
    institution = {Institut f\"ur Photogrammetrie, Universit\"at Bonn},
    year = {1994},
    abstract = {Kurzdokumentation \"uber die Handhabung des Programms grinv. Weitergehende Informationen k\"onnen aus Wang et al. (1981) entnommen werden.},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Weidner1994GRINV.pdf},
    }

  • U. Weidner, “Information-Preserving Surface Restoration And Feature Extraction For Digital Elevation Models,” in Proc. of ISPRS Comm. III Symposium on Spatial Information from Digital Photogrammetry and Computer Vision, München, Germany, 1994. doi:10.1117/12.182917
    [BibTeX] [PDF]

    Pre-processing such as filtering data in order to remove or at least reduce noise is a crucial step because information which is lost during this filtering cannot be recovered in subsequent steps. It is a well-known fact, that linear filtering does not only reduce noise, but may also lead to a loss of information due to the global smoothing, regardless of structures in the data. In order to overcome these drawbacks, we propose to use an algorithm for parameterfree information-preserving surface restoration. As we do not want to evaluate the results of the filtering only qualitatively by visual inspection, we examine the influence of pre-processing on feature extraction for digital elevation models and discuss quantities for the evaluation of these influences.

    @InProceedings{weidner1994information,
    title = {Information-Preserving Surface Restoration And Feature Extraction For Digital Elevation Models},
    author = {Weidner, Uwe},
    booktitle = {Proc. of ISPRS Comm. III Symposium on Spatial Information from Digital Photogrammetry and Computer Vision},
    year = {1994},
    address = {M\"unchen, Germany},
    abstract = {Pre-processing such as filtering data in order to remove or at least reduce noise is a crucial step because information which is lost during this filtering cannot be recovered in subsequent steps. It is a well-known fact, that linear filtering does not only reduce noise, but may also lead to a loss of information due to the global smoothing, regardless of structures in the data. In order to overcome these drawbacks, we propose to use an algorithm for parameterfree information-preserving surface restoration. As we do not want to evaluate the results of the filtering only qualitatively by visual inspection, we examine the influence of pre-processing on feature extraction for digital elevation models and discuss quantities for the evaluation of these influences.},
    city = {Bonn},
    doi = {10.1117/12.182917},
    proceeding = {Proc. of ISPRS Comm. III Symposium on Spatial Information from Digital Photogrammetry and Computer Vision},
    url = {https://www.ipb.uni-bonn.de/pdfs/Weidner1994Information.pdf},
    }

  • U. Weidner, “REKO – Bildrekonstruktion mittels informationserhaltender Filterung,” Institut für Photogrammetrie, Universität Bonn 1994.
    [BibTeX] [PDF]

    Kurzdokumentation über die Handhabung des Programms reko. Weitergehende Informationen können aus Weidner (1991) entnommen werden.

    @TechReport{weidner1994reko,
    title = {REKO - Bildrekonstruktion mittels informationserhaltender Filterung},
    author = {Weidner, Uwe},
    institution = {Institut f\"ur Photogrammetrie, Universit\"at Bonn},
    year = {1994},
    abstract = {Kurzdokumentation \"uber die Handhabung des Programms reko. Weitergehende Informationen k\"onnen aus Weidner (1991) entnommen werden.},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Weidner1994REKO.pdf},
    }

  • U. Weidner, “VISTRI – Visualisierung von Flächen,” Institut für Photogrammetrie, Universität Bonn 1994.
    [BibTeX] [PDF]

    Kurzdokumentation über die Handhabung des Programms vistri.

    @TechReport{weidner1994vistri,
    title = {VISTRI - Visualisierung von Fl\"achen},
    author = {Weidner, Uwe},
    institution = {Institut f\"ur Photogrammetrie, Universit\"at Bonn},
    year = {1994},
    abstract = {Kurzdokumentation \"uber die Handhabung des Programms vistri.},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Weidner1994VISTRI.pdf},
    }

  • S. Winter, “Bericht über die AGDM 94,” Institut für Photogrammetrie, Universität Bonn 1994.
    [BibTeX] [PDF]
    [none]
    @TechReport{winter1994bericht,
    title = {Bericht \"uber die AGDM 94},
    author = {Winter, Stephan},
    institution = {Institut f\"ur Photogrammetrie, Universit\"at Bonn},
    year = {1994},
    abstract = {[none]},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Winter1994Bericht.pdf},
    }

  • S. Winter, “Uncertainty in Topological Relationships in GIS,” in Proc. of ISPRS Comm. III Symposium on Spatial Information from Digital Photogrammetry and Computer Vision, München, 1994.
    [BibTeX] [PDF]

    We present a concept for representing uncertain topological relations and their derivation from uncertain sets, useful for spatial and temporal reasoning in GIS. The concept is based on the notion of a stochastic boundary of a geometric set and on tests performed to decide the validity of relations between the sets. It uses the power-function to derive the probabilities of the found relations. The concept is applicable to all questions where uncertain geometric queries or analysis have to be performed.

    @InProceedings{winter1994uncertainty,
    title = {Uncertainty in Topological Relationships in GIS},
    author = {Winter, Stephan},
    booktitle = {Proc. of ISPRS Comm. III Symposium on Spatial Information from Digital Photogrammetry and Computer Vision},
    year = {1994},
    address = {M\"unchen},
    abstract = {We present a concept for representing uncertain topological relations and their derivation from uncertain sets, useful for spatial and temporal reasoning in GIS. The concept is based on the notion of a stochastic boundary of a geometric set and on tests performed to decide the validity of relations between the sets. It uses the power-function to derive the probabilities of the found relations. The concept is applicable to all questions where uncertain geometric queries or analysis have to be performed.},
    city = {Bonn},
    proceeding = {Proc. of ISPRS Comm. III Symposium on Spatial Information from Digital Photogrammetry and Computer Vision},
    url = {https://www.ipb.uni-bonn.de/pdfs/Winter1994Uncertainty.pdf},
    }

  • C. Wittich, “Interaktive Extraktion von Satteldächern aus Digitalen Luftbildern,” Diplomarbeit Master Thesis, 1994.
    [BibTeX]
    [none]
    @MastersThesis{wittich1994interaktive,
    title = {Interaktive Extraktion von Satteld\"achern aus Digitalen Luftbildern},
    author = {Wittich, Christian},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1994},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. Gisela Ditze},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • B. Zöller, “Untersuchungen zur phototreuen Animation von Gebäuden,” Diplomarbeit Master Thesis, 1994.
    [BibTeX]
    [none]
    @MastersThesis{zoller1994untersuchungen,
    title = {Untersuchungen zur phototreuen Animation von Geb\"auden},
    author = {Z\"oller, Bruno},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1994},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. Wolfgang Schickler},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

1993

  • K. Boeck, “Untersuchung zur phototreuen Visualisierung von Gebäuden,” Diplomarbeit Master Thesis, 1993.
    [BibTeX]
    [none]
    @MastersThesis{boeck1993untersuchung,
    title = {Untersuchung zur phototreuen Visualisierung von Geb\"auden},
    author = {Boeck, Katrin},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1993},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. Wolfgang Schickler},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • K. Diemer, “Verschmelzung partieller 3D-Modelle von Polyedern,” Diplomarbeit Master Thesis, 1993.
    [BibTeX]
    [none]
    @MastersThesis{diemer1993verschmelzung,
    title = {Verschmelzung partieller 3D-Modelle von Polyedern},
    author = {Diemer, Klaus},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1993},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. Carola Braun},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • W. Föerstner, “A Future of Photogrammetic Research,” Geodesia, vol. 8, pp. 372-383, 1993.
    [BibTeX] [PDF]

    The computer’s access to the image data requires a new definition of photogrammetric tasks. The geometric Modelling in aerial triangulation and the physical modelling in remote sensing has to be embedded in a semantic modelling of the objects to be extracted from aerial and satellite images. This article wants to stress the urgent need for the development of a new theoretical basis and sketch some research issues in image interpretation being the key issue for a scientific evolution of photogrammetry.

    @Article{foerstner1993,
    title = {A Future of Photogrammetic Research},
    author = {Wolfgang F\"oerstner},
    journal = {Geodesia},
    year = {1993},
    pages = {372-383},
    volume = {8},
    abstract = {The computer's access to the image data requires a new definition of photogrammetric tasks. The geometric Modelling in aerial triangulation and the physical modelling in remote sensing has to be embedded in a semantic modelling of the objects to be extracted from aerial and satellite images. This article wants to stress the urgent need for the development of a new theoretical basis and sketch some research issues in image interpretation being the key issue for a scientific evolution of photogrammetry.},
    timestamp = {2014.10.23},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner93afuture.pdf},
    }

  • W. Förstner, “Image Matching,” in Computer and Robot Vision, 1993, p. 289–379.
    [BibTeX] [PDF]
    [none]
    @InProceedings{forstner1993image,
    title = {Image Matching},
    author = {F\"orstner, Wolfgang},
    booktitle = {Computer and Robot Vision},
    year = {1993},
    editor = {Haralick, R. M. and Shapiro, L. G.},
    pages = {289--379},
    abstract = {[none]},
    city = {Bonn},
    proceeding = {Computer and Robot Vision},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1993Image.pdf},
    }

  • W. Förstner, “Feature Extraction in Digital Photogrammetry,” Photogrammetric Record, vol. 14, iss. 82, pp. 595-611, 1993.
    [BibTeX] [PDF]
    @Article{foerstner93feature,
    title = {{Feature Extraction in Digital Photogrammetry}},
    author = {F\"orstner, W.},
    journal = {Photogrammetric Record},
    year = {1993},
    number = {82},
    pages = {595-611},
    volume = {14},
    url = {https://www.ipb.uni-bonn.de/pdfs/foerstner93Feature.pdf},
    }

  • A. Hertel, “Automatisierung der Objektrekonstruktion aus Bildern,” Diplomarbeit Master Thesis, 1993.
    [BibTeX]
    [none]
    @MastersThesis{hertel1993automatisierung,
    title = {Automatisierung der Objektrekonstruktion aus Bildern},
    author = {Hertel, Antje},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1993},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. Gisela Ditze},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • A. Klennert, “Untersuchungen zum Verfahren der anisotropen Diffusion,” Diplomarbeit Master Thesis, 1993.
    [BibTeX]
    [none]
    @MastersThesis{klennert1993untersuchungen,
    title = {Untersuchungen zum Verfahren der anisotropen Diffusion},
    author = {Klennert, Antje},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1993},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. Uwe Weidner},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • H. Knaff, “Extraktion morphologisch relevanter Informationen aus Digitalen Höhenmodellen,” Diplomarbeit Master Thesis, 1993.
    [BibTeX]
    [none]
    @MastersThesis{knaff1993extraktion,
    title = {Extraktion morphologisch relevanter Informationen aus Digitalen H\"ohenmodellen},
    author = {Knaff, Henri},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1993},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. Uwe Weidner},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • F. Lang and W. Schickler, “Semiautomatische 3D Gebäudeerfassung aus digitalen Bildern,” Zeitschrift für Photogrammetrie und Fernerkundung, iss. 5, p. 193–200, 1993.
    [BibTeX]

    The paper presents a concept for the model based threedimensional acquisition of buildings from single and multiple images. For solving the interpretation problem an automatic building extraction procedure requires an apropriate object model. We use parametrized models implicitely representing inherent geometric and topological contraints. The semiautomatic procedure supports the operator during the mensuration process and the final fit of the models to the image data. The implementation and first results are reported.

    @Article{lang1993semiautomatische,
    title = {Semiautomatische 3D Geb\"audeerfassung aus digitalen Bildern},
    author = {Lang, Felicitas and Schickler, Wolfgang},
    journal = {Zeitschrift f\"ur Photogrammetrie und Fernerkundung},
    year = {1993},
    number = {5},
    pages = {193--200},
    abstract = {The paper presents a concept for the model based threedimensional acquisition of buildings from single and multiple images. For solving the interpretation problem an automatic building extraction procedure requires an apropriate object model. We use parametrized models implicitely representing inherent geometric and topological contraints. The semiautomatic procedure supports the operator during the mensuration process and the final fit of the models to the image data. The implementation and first results are reported.},
    city = {Bonn},
    }

  • C. Toma, “Robuste Cluster-Analyse von Digitalbildern,” Diplomarbeit Master Thesis, 1993.
    [BibTeX]
    [none]
    @MastersThesis{toma1993robuste,
    title = {Robuste Cluster-Analyse von Digitalbildern},
    author = {Toma, Christian},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1993},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. Thomas L\"ocherbach},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • U. Weidner, “Krümmungsmaße,” Institut für Photogrammetrie, Universität Bonn 1993.
    [BibTeX] [PDF]

    Dieses Arbeitspapier stellt differentialgeometrische Grundlagen in Tensorschreibweise dar und zeigt Möglichkeiten zur Berechnung von Krümmungsmaßen für durch diskrete Punkte gegebene Flächen auf. Weiterhin werden Möglichkeiten zur Segmentierung von Flächen auf der Basis dieser Krümmungsmaße diskutiert.

    @TechReport{weidner1993krummungsmae,
    title = {Kr\"ummungsma{\ss}e},
    author = {Weidner, Uwe},
    institution = {Institut f\"ur Photogrammetrie, Universit\"at Bonn},
    year = {1993},
    abstract = {Dieses Arbeitspapier stellt differentialgeometrische Grundlagen in Tensorschreibweise dar und zeigt M\"oglichkeiten zur Berechnung von Kr\"ummungsma{\ss}en f\"ur durch diskrete Punkte gegebene Fl\"achen auf. Weiterhin werden M\"oglichkeiten zur Segmentierung von Fl\"achen auf der Basis dieser Kr\"ummungsma{\ss}e diskutiert.},
    city = {Bonn},
    url = {https://www.ipb.uni-bonn.de/pdfs/Weidner1993Krummungsmae.pdf},
    }

1992

  • A. Block and T. Otte, “Gruppierung von Bildprimitiven in digitalen Bildern,” Diplomarbeit Master Thesis, 1992.
    [BibTeX]
    [none]
    @MastersThesis{block1992gruppierung,
    title = {Gruppierung von Bildprimitiven in digitalen Bildern},
    author = {Block, Achim and Otte, Thomas},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1992},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. Claudia Fuchs},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • R. Brügelmann and W. Förstner, “Noise Estimation for Color Edge Extraction,” in Robust Computer Vision, Karlsruhe, 1992, p. 90–107.
    [BibTeX] [PDF]

    This paper discusses an automatic procedure for color edge extraction. It contains a procedure for robustly estimating the signal dependent components ofthe noise which is assumed to be influenced mainly by the Poisson statistics of the photon radiance. This allow to mutually weight different channels of a multispectral images. Except for a significance level no other thresholds are required.

    @InProceedings{brugelmann1992noise,
    title = {Noise Estimation for Color Edge Extraction},
    author = {Br\"ugelmann, Regina and F\"orstner, Wolfgang},
    booktitle = {Robust Computer Vision},
    year = {1992},
    address = {Karlsruhe},
    editor = {F\"orstner, Wolfgang and Winter, Stephan},
    pages = {90--107},
    publisher = {Wichmann, Karlsruhe},
    abstract = {This paper discusses an automatic procedure for color edge extraction. It contains a procedure for robustly estimating the signal dependent components ofthe noise which is assumed to be influenced mainly by the Poisson statistics of the photon radiance. This allow to mutually weight different channels of a multispectral images. Except for a significance level no other thresholds are required.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Brugelmann1992Noise.pdf},
    }

  • T. Löcherbach, “Reconstruction of Land-Use Units …,” in Proc. of the IAPR-TC7 Workshopon Multisource Data Integration in Remote Sensing with Respect to Land Inventory Applications, Delft, The Nederlands, 1992, p. 95–112.
    [BibTeX]
    [none]
    @InProceedings{locherbach1992reconstruction,
    title = {Reconstruction of Land-Use Units ...},
    author = {L\"ocherbach, Thomas},
    booktitle = {Proc. of the IAPR-TC7 Workshopon Multisource Data Integration in Remote Sensing with Respect to Land Inventory Applications},
    year = {1992},
    address = {Delft, The Nederlands},
    pages = {95--112},
    abstract = {[none]},
    }

  • M. Lincke, “Optimierung eines photogrammetrischen Netzes,” Diplomarbeit Master Thesis, 1992.
    [BibTeX]
    [none]
    @MastersThesis{lincke1992optimierung,
    title = {Optimierung eines photogrammetrischen Netzes},
    author = {Lincke, Matthias},
    school = {Institute of Photogrammetry, University of Bonn In Zusammenarbeit mit der Volkswagen AG Wolfsburg},
    year = {1992},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. W. Riechmann},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • G. Peter, “Entwicklung eines Algorithmus zum Parsen von Parzellenverbänden,” Diplomarbeit Master Thesis, 1992.
    [BibTeX]
    [none]
    @MastersThesis{peter1992entwicklung,
    title = {Entwicklung eines Algorithmus zum Parsen von Parzellenverb\"anden},
    author = {Peter, Georg},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1992},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. Claudia Fuchs},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • B. Treutler, “Verfahren zur Rekonstruktion von Knotenpunktbereichen in Linienbildern,” Diplomarbeit Master Thesis, 1992.
    [BibTeX]
    [none]
    @MastersThesis{treutler1992verfahren,
    title = {Verfahren zur Rekonstruktion von Knotenpunktbereichen in Linienbildern},
    author = {Treutler, Bernhard},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1992},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. Carola Braun},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

1991

  • W. Förstner, “Statistische Verfahren für die automatische Bildanalyse und ihre Bewertung bei der Objekterkennung und -vermessung,” in Deutsche Geodätische Kommission bei der Bayerischen Akademie der Wissenschaften, 1991.
    [BibTeX] [PDF]
    [none]
    @InProceedings{forstner1991statistische,
    title = {Statistische Verfahren f\"ur die automatische Bildanalyse und ihre Bewertung bei der Objekterkennung und -vermessung},
    author = {F\"orstner, Wolfgang},
    booktitle = {Deutsche Geod\"atische Kommission bei der Bayerischen Akademie der Wissenschaften},
    year = {1991},
    abstract = {[none]},
    city = {M\"unchen},
    proceeding = {DGK},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1991Statistische.pdf},
    }

  • G. Jarczyk, “Untersuchungen zur Texturanalyse,” Diplomarbeit Master Thesis, 1991.
    [BibTeX]
    [none]
    @MastersThesis{jarczyk1991untersuchungen,
    title = {Untersuchungen zur Texturanalyse},
    author = {Jarczyk, Gregor},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1991},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. Claudia Fuchs},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • H. Schindzielorz, “Rekonstruktion von Polyedern aus einer perspektiven Skizze,” Diplomarbeit Master Thesis, 1991.
    [BibTeX]
    [none]
    @MastersThesis{schindzielorz1991rekonstruktion,
    title = {Rekonstruktion von Polyedern aus einer perspektiven Skizze},
    author = {Schindzielorz, Heinz},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1991},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. C. Peters},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

1990

  • A. Cuda, “Untersuchung des Konvergenzbereiches des räumlichen Rückwärtsschnittes nach Hinsken,” Diplomarbeit Master Thesis, 1990.
    [BibTeX]
    [none]
    @MastersThesis{cuda1990untersuchung,
    title = {Untersuchung des Konvergenzbereiches des r\"aumlichen R\"uckw\"artsschnittes nach Hinsken},
    author = {Cuda, Albert},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1990},
    note = {Betreuung: Dr.-Ing. Karl-Heiko Ellenbeck, Prof. Dr.-Ing. Wolfgang F\"orstner},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • V. Döring, “Datenstrukturen und Basisalgorithmen für die Verarbeitung von Flächen,” Diplomarbeit Master Thesis, 1990.
    [BibTeX]
    [none]
    @MastersThesis{doring1990datenstrukturen,
    title = {Datenstrukturen und Basisalgorithmen f\"ur die Verarbeitung von Fl\"achen},
    author = {D\"oring, Volkmar},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1990},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. Uwe Tempelmann},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • C. Glock, “Erkennung von Symmetrien in Skizzen,” Diplomarbeit Master Thesis, 1990.
    [BibTeX]
    [none]
    @MastersThesis{glock1990erkennung,
    title = {Erkennung von Symmetrien in Skizzen},
    author = {Glock, Christof},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1990},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. C. Peters},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • T. Hau, “Morphologische Glättung von Linien,” Diplomarbeit Master Thesis, 1990.
    [BibTeX]
    [none]
    @MastersThesis{hau1990morphologische,
    title = {Morphologische Gl\"attung von Linien},
    author = {Hau, Thomas},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1990},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. C. Peters},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • H. J. Jäger and A. Miggelt, “Entwicklung und Untersuchung eines Programms zur aktiven Linienmessung in digitalen Bildern,” Diplomarbeit Master Thesis, 1990.
    [BibTeX]
    [none]
    @MastersThesis{jager1990entwicklung,
    title = {Entwicklung und Untersuchung eines Programms zur aktiven Linienmessung in digitalen Bildern},
    author = {J\"ager, Hermann J. and Miggelt, Andreas},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1990},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. C. Peters},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • M. Küpper, “Ermittelung dreidimensionaler Modellkoordinaten von Objekten mit dem Verfahren der Epipolarebenenbild-Analyse,” Diplomarbeit Master Thesis, 1990.
    [BibTeX]
    [none]
    @MastersThesis{kupper1990ermittelung,
    title = {Ermittelung dreidimensionaler Modellkoordinaten von Objekten mit dem Verfahren der Epipolarebenenbild-Analyse},
    author = {K\"upper, Martin},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1990},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. C. Peters},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • B. Reisch and T. Vreden, “Automatische Messung des Mittelpunktes von Réseaukreuzen und ellipsenförmigen Targets in digitalen Bildausschnitten,” Diplomarbeit Master Thesis, 1990.
    [BibTeX]
    [none]
    @MastersThesis{reisch1990automatische,
    title = {Automatische Messung des Mittelpunktes von R\'eseaukreuzen und ellipsenf\"ormigen Targets in digitalen Bildausschnitten},
    author = {Reisch, Bernard and Vreden, Tobias},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1990},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. C. Peters},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • U. Weidner, “Entwicklung eines Verfahrens zur informationserhaltenen Filterung digitaler Bilder,” Diplomarbeit Master Thesis, 1990.
    [BibTeX]
    [none]
    @MastersThesis{weidner1990entwicklung,
    title = {Entwicklung eines Verfahrens zur informationserhaltenen Filterung digitaler Bilder},
    author = {Weidner, Uwe},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1990},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. C. Peters},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

  • A. Willinghöfer and D. Wolff, “Extraktion einer symbolischen Bildbeschreibung aus digitalen Bildern,” Diplomarbeit Master Thesis, 1990.
    [BibTeX]
    [none]
    @MastersThesis{willinghofer1990extraktion,
    title = {Extraktion einer symbolischen Bildbeschreibung aus digitalen Bildern},
    author = {Willingh\"ofer, Axel and Wolff, Detlef},
    school = {Institute of Photogrammetry, University of Bonn},
    year = {1990},
    note = {Betreuung: Prof. Dr.-Ing. Wolfgang F\"orstner, Dipl.-Ing. C. Peters},
    type = {Diplomarbeit},
    abstract = {[none]},
    city = {Bonn},
    }

1989

  • W. Förstner, “Precision of Geometric Features derived from Image Sequences,” in Proc. of an International Workshop “High Precision Navigation: Integration of Navigational and Geodetic Methods”, Stuttgart and Altensteig, 1989, p. 313–329. doi:10.1007/978-3-642-74585-0_23
    [BibTeX] [PDF]

    The paper discusses the accuracy potential of mono and stereo image sequences. Specifically treated are the effect of image blur onto the precision of image features, the precision obtainable for relative position, orientation and speed of the sensor platform with respect to a given coordinate frame or to other moving objects. The results can be used for designing a multisensor navigation system.

    @InProceedings{forstner1989precision,
    title = {Precision of Geometric Features derived from Image Sequences},
    author = {F\"orstner, Wolfgang},
    booktitle = {Proc. of an International Workshop "High Precision Navigation: Integration of Navigational and Geodetic Methods"},
    year = {1989},
    address = {Stuttgart and Altensteig},
    editor = {Linkwitz, K. and Hangleiter, U. (Eds.)},
    pages = {313--329},
    abstract = {The paper discusses the accuracy potential of mono and stereo image sequences. Specifically treated are the effect of image blur onto the precision of image features, the precision obtainable for relative position, orientation and speed of the sensor platform with respect to a given coordinate frame or to other moving objects. The results can be used for designing a multisensor navigation system.},
    doi = {10.1007/978-3-642-74585-0_23},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1989Precision.pdf},
    }

  • W. Förstner, “Image Analysis Techniques for Digital Photogrammetry,” in Photogrammetrische Woche, Stuttgart, 1989, p. 205–221.
    [BibTeX] [PDF]
    [none]
    @InProceedings{forstner1989image,
    title = {Image Analysis Techniques for Digital Photogrammetry},
    author = {F\"orstner, Wolfgang},
    booktitle = {Photogrammetrische Woche},
    year = {1989},
    address = {Stuttgart},
    pages = {205--221},
    abstract = {[none]},
    city = {Bonn},
    proceeding = {Photogrammetrische Woche 1989},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1989Image.pdf},
    }

  • W. Förstner and M. Sester, “Object Location Based on Uncertain Models,” in Mustererkennung 1989, 11. DAGM-Symposium, Hamburg, 1989, p. 457–464.
    [BibTeX] [PDF]

    The paper describes a concept for object location, when not only image features but also the model description is uncertain. It contains a method for probabilistic clustering, robust estimation and a measure for evaluating both, inaccurate and missing image features. The location of topographic control points in digitized aerial images demonstrates the feasibility of the procedure and the usefullness of the evaluation criteria.

    @InProceedings{forstner1989object,
    title = {Object Location Based on Uncertain Models},
    author = {F\"orstner, Wolfgang and Sester, Monika},
    booktitle = {Mustererkennung 1989, 11. DAGM-Symposium},
    year = {1989},
    address = {Hamburg},
    editor = {Burkhardt, K. H. and H\"ohne, B. and Neumann, B.},
    pages = {457--464},
    abstract = {The paper describes a concept for object location, when not only image features but also the model description is uncertain. It contains a method for probabilistic clustering, robust estimation and a measure for evaluating both, inaccurate and missing image features. The location of topographic control points in digitized aerial images demonstrates the feasibility of the procedure and the usefullness of the evaluation criteria.},
    city = {Bonn},
    proceeding = {Mustererkennung 1989, 11. DAGM-Symposium},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1989Object.pdf},
    }

1988

  • W. Förstner and G. Vosselmann, “The Precision of a Digital Camera,” in ISPRS 16th Congress, Kyoto, 1988, p. 148–157.
    [BibTeX] [PDF]

    A testfield containing a large number of black targets on a white background has been recorded by a digital camera from many points of view. In each digital image, the targets have been located using elementary image processing techniques. Precise coordinates were obtained by matching the targets with artificial masks. The precision of these coordinates was calculated in a bundle block adjustment with self-calibration parameters. The achieved precision amounted to 0.03 pixel, corresponding to 0.8 um in the image plane.

    @InProceedings{forstner1988precision,
    title = {The Precision of a Digital Camera},
    author = {F\"orstner, Wolfgang and Vosselmann, George},
    booktitle = {ISPRS 16th Congress},
    year = {1988},
    address = {Kyoto},
    pages = {148--157},
    abstract = {A testfield containing a large number of black targets on a white background has been recorded by a digital camera from many points of view. In each digital image, the targets have been located using elementary image processing techniques. Precise coordinates were obtained by matching the targets with artificial masks. The precision of these coordinates was calculated in a bundle block adjustment with self-calibration parameters. The achieved precision amounted to 0.03 pixel, corresponding to 0.8 um in the image plane.},
    city = {Bonn},
    proceeding = {ISPRS 16th Congress},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1988Precision.pdf},
    }

1987

  • W. Förstner, “Reliability Analysis of Parameter Estimation in Linear Models with Applications to Mensuration Problems in Computer Vision,” in CVGIP – Computer Vision, Graphics, and Image Processing, 1987, p. 273–310. doi:10.1016/S0734-189X(87)80144-5
    [BibTeX] [PDF]

    The analysis of a mensuration problem aims at an evaluation of the suitability of the design of the measuring process for a specific task and at an assessment of the actually obtained measurements and of their influence onto the result. The concept of quality control, as it has been developed by the Netherlands geodesist W. Baarda is outlined. This theory provides objective quality measures, which take the geometry of the design and the used estimation and testing procedure into account: The evaluation of the design is based on measures for the precision, the controllability, and the robustness, which themselves can be used for planning purposes. The evaluation of the data is based on a statistical test, the estimated size of possible blunders and on the influence of the observed values onto the result. Three examples, namely template matching and absolute and relative orientation of cameras, demonstrate that the measures make intuitive evaluation precise and that they seem to besuitable for automatic quality control of mensuration problems encountered in computer vision.

    @InProceedings{forstner1987reliability,
    title = {Reliability Analysis of Parameter Estimation in Linear Models with Applications to Mensuration Problems in Computer Vision},
    author = {F\"orstner, Wolfgang},
    booktitle = {CVGIP - Computer Vision, Graphics, and Image Processing},
    year = {1987},
    pages = {273--310},
    abstract = {The analysis of a mensuration problem aims at an evaluation of the suitability of the design of the measuring process for a specific task and at an assessment of the actually obtained measurements and of their influence onto the result. The concept of quality control, as it has been developed by the Netherlands geodesist W. Baarda is outlined. This theory provides objective quality measures, which take the geometry of the design and the used estimation and testing procedure into account: The evaluation of the design is based on measures for the precision, the controllability, and the robustness, which themselves can be used for planning purposes. The evaluation of the data is based on a statistical test, the estimated size of possible blunders and on the influence of the observed values onto the result. Three examples, namely template matching and absolute and relative orientation of cameras, demonstrate that the measures make intuitive evaluation precise and that they seem to besuitable for automatic quality control of mensuration problems encountered in computer vision.},
    city = {Bonn},
    doi = {10.1016/S0734-189X(87)80144-5},
    proceeding = {CVGIP - Computer Vision, Graphics, and Image Processing},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1987Reliability.pdf},
    }

  • W. Förstner and E. Gülch, “A Fast Operator for Detection and Precise Location of Distict Point, Corners and Centres of Circular Features,” in Proc. of the ISPRS Conf. on Fast Processing of Photogrammetric Data, Interlaken, 1987, p. 281–305.
    [BibTeX] [PDF]

    Feature extraction is a basic step for image matching and image analysis. The paper describes a fast operator for the detection and precise location of distinct points, corners and centres of circular image features. Distinct points are needed for feature based image matching or for trackong in image sequences. A special class of these distinct points are corners, which, beside edges, are the basic element for the analysis of polyhedra. Finally centres of circular features cover small targeted points and holes, disks or rings, which play an important role in one-dimensional image analysis. The extraction consists of two steps: window selection and feature location. The speed of the non-iterative operator results from parallelism on the arithmetic as well on the process level. Specifically the operator can be split into arithmetic operations on and between imgaes, convolutions, partly with boxfilters, and finally vector and matrix operations. The operator provides a measure for the precision of the location.

    @InProceedings{forstner1987fast,
    title = {A Fast Operator for Detection and Precise Location of Distict Point, Corners and Centres of Circular Features},
    author = {F\"orstner, Wolfgang and G\"ulch, Eberhard},
    booktitle = {Proc. of the ISPRS Conf. on Fast Processing of Photogrammetric Data},
    year = {1987},
    address = {Interlaken},
    pages = {281--305},
    abstract = {Feature extraction is a basic step for image matching and image analysis. The paper describes a fast operator for the detection and precise location of distinct points, corners and centres of circular image features. Distinct points are needed for feature based image matching or for trackong in image sequences. A special class of these distinct points are corners, which, beside edges, are the basic element for the analysis of polyhedra. Finally centres of circular features cover small targeted points and holes, disks or rings, which play an important role in one-dimensional image analysis. The extraction consists of two steps: window selection and feature location. The speed of the non-iterative operator results from parallelism on the arithmetic as well on the process level. Specifically the operator can be split into arithmetic operations on and between imgaes, convolutions, partly with boxfilters, and finally vector and matrix operations. The operator provides a measure for the precision of the location.},
    city = {Bonn},
    proceeding = {Proc. of the ISPRS Conf. on Fast Processing of Photogrammetric Data},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1987Fast.pdf},
    }

1986

  • W. Förstner, “Abbildungen zu “A feature based correspondence algorithm for image matching”,” in ISP Comm. III., Rovaniemi, 1986.
    [BibTeX] [PDF]
    [none]
    @InProceedings{forstner1986abbildungen,
    title = {Abbildungen zu "A feature based correspondence algorithm for image matching"},
    author = {F\"orstner, Wolfgang},
    booktitle = {ISP Comm. III.},
    year = {1986},
    address = {Rovaniemi},
    abstract = {[none]},
    city = {Bonn},
    proceeding = {ISP Comm. III},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1986Abbildungen.pdf},
    }

  • W. Förstner, “A feature based correspondence algorithm for image matching,” in ISP Comm. III, Rovaniemi, 1986.
    [BibTeX] [PDF]

    A new feature based correspondence algorithm for image matching is presented. The interest operator is optimal for selecting points which promise high matching accuracy, for selecting corners with arbitrary number and orientation of edges or centres of discs, circles or rings. The similarily measure can take the seldomness of the selected points into account. The consistency of the solution is achieved by maximum likelihood type (robust) estimation for the parameters of an object model. Approximate values have to be better than 1/3 of the size of the image in shift, 20 degrees in rotation and 30 % in scale.

    @InProceedings{forstner1986feature,
    title = {A feature based correspondence algorithm for image matching},
    author = {F\"orstner, Wolfgang},
    booktitle = {ISP Comm. III},
    year = {1986},
    address = {Rovaniemi},
    abstract = {A new feature based correspondence algorithm for image matching is presented. The interest operator is optimal for selecting points which promise high matching accuracy, for selecting corners with arbitrary number and orientation of edges or centres of discs, circles or rings. The similarily measure can take the seldomness of the selected points into account. The consistency of the solution is achieved by maximum likelihood type (robust) estimation for the parameters of an object model. Approximate values have to be better than 1/3 of the size of the image in shift, 20 degrees in rotation and 30 % in scale.},
    city = {Bonn},
    proceeding = {ISP Comm. III},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1986feature.pdf},
    }

  • W. Förstner, “Text zu “A feature based correspondence algorithm for image matching”,” in ISP Comm. III, Rovaniemi, 1986.
    [BibTeX] [PDF]

    A new feature based correspondence algorithm for image matching is presented. The interest operator is optimal for selecting points which promise high matching accuracy, for selecting corners with arbitrary number and orientation of edges or centres of discs, circles or rings. The similarily measure can take the seldomness of the selected points into account. The consistency of the solution is achieved by maximum likelihood type (robust) estimation for the parameters of an object model. Approximate values have to be better than 1/3 of the size of the image in shift, 20 \degrees in rotation and 30 % in scale.

    @InProceedings{forstner1986text,
    title = {Text zu "A feature based correspondence algorithm for image matching"},
    author = {F\"orstner, Wolfgang},
    booktitle = {ISP Comm. III},
    year = {1986},
    address = {Rovaniemi},
    abstract = {A new feature based correspondence algorithm for image matching is presented. The interest operator is optimal for selecting points which promise high matching accuracy, for selecting corners with arbitrary number and orientation of edges or centres of discs, circles or rings. The similarily measure can take the seldomness of the selected points into account. The consistency of the solution is achieved by maximum likelihood type (robust) estimation for the parameters of an object model. Approximate values have to be better than 1/3 of the size of the image in shift, 20 \degrees in rotation and 30 % in scale.},
    city = {Bonn},
    proceeding = {ISP Comm. III},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1986Text.pdf},
    }

1985

  • W. Förstner, “Determination of the Additive Noise Variance in Observed Autoregressive Processes using Variance Component Estimation Technique,” in Statistics and Decision, Supplement Issue No. 2, München, 1985, p. 263–274.
    [BibTeX] [PDF]

    The paper discusses the determination of the variances sigma_e^2 und sigma_n^2 in an observed autoregressive process y_i=x_i+n_i, with x_i=sum(a_k x_i-k + e_i). It is shown, that approximating the estimated Fourier power spectrum P_y(u) by least squares fit E(P_y(u))=|H(u)|^2 sigma_e^2 + sigma_n^2 is identical and numerical properties of the procedure are analysed showing the versatility of approach.

    @InProceedings{forstner1985determination,
    title = {Determination of the Additive Noise Variance in Observed Autoregressive Processes using Variance Component Estimation Technique},
    author = {F\"orstner, Wolfgang},
    booktitle = {Statistics and Decision, Supplement Issue No. 2},
    year = {1985},
    address = {M\"unchen},
    pages = {263--274},
    abstract = {The paper discusses the determination of the variances sigma_e^2 und sigma_n^2 in an observed autoregressive process y_i=x_i+n_i, with x_i=sum(a_k x_i-k + e_i). It is shown, that approximating the estimated Fourier power spectrum P_y(u) by least squares fit E(P_y(u))=|H(u)|^2 sigma_e^2 + sigma_n^2 is identical and numerical properties of the procedure are analysed showing the versatility of approach.},
    city = {Bonn},
    proceeding = {Statistics and Decision, Supplement Issue No. 2},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1985Determination.pdf},
    }

  • W. Förstner, “High Quality Photogrammetric Point Determination,” Allgemeine Vermessungsnachrichten, vol. International Edition 2, p. 32–41, 1985.
    [BibTeX] [PDF]

    \textbf{Summary} Photogrammetric blocktriangulation is a versatile tool for high quality point determination. The paper outlines the precision and reliability features of the method. Examples of controlled tests prove that accuracies of 3-5 ppm can be achieved on standard equipment and that proper planning guarantees results which are robust with respect to gross and systematic errors. Digital image correlation techniques will further increase the economy and the flexibility of the procedure. \textbf{Zusammenfassung} Die photogrammetrische Blocktriangulation ist ein vielseitiges Instrument zur genauen Punktbestimmung. Der Beitrag zeigt die Genauigkeit und die Zuverlässigkeit, die dieses Verfahren kennzeichnen, auf. Die Beispiele mit kontrollierten Tests beweisen, dass mit normalen Instrumentarium Genauigkeiten von 3-5 ppm erreichbar sind, und dass eine gute Planung Ergebnisse liefert, die rubust gegenüber groben und systematischen Fehlern sind. Techniken zur digitalen Zuordnung und Korrelation von Bildern werden die Anpassungsfähigkeit und die Wirtschaftlichkeit dieses Verfahrens noch steigern.

    @Article{forstner1985high,
    title = {High Quality Photogrammetric Point Determination},
    author = {F\"orstner, Wolfgang:},
    journal = {Allgemeine Vermessungsnachrichten},
    year = {1985},
    pages = {32--41},
    volume = {International Edition 2},
    abstract = {\textbf{Summary} Photogrammetric blocktriangulation is a versatile tool for high quality point determination. The paper outlines the precision and reliability features of the method. Examples of controlled tests prove that accuracies of 3-5 ppm can be achieved on standard equipment and that proper planning guarantees results which are robust with respect to gross and systematic errors. Digital image correlation techniques will further increase the economy and the flexibility of the procedure. \textbf{Zusammenfassung} Die photogrammetrische Blocktriangulation ist ein vielseitiges Instrument zur genauen Punktbestimmung. Der Beitrag zeigt die Genauigkeit und die Zuverl\"assigkeit, die dieses Verfahren kennzeichnen, auf. Die Beispiele mit kontrollierten Tests beweisen, dass mit normalen Instrumentarium Genauigkeiten von 3-5 ppm erreichbar sind, und dass eine gute Planung Ergebnisse liefert, die rubust gegen\"uber groben und systematischen Fehlern sind. Techniken zur digitalen Zuordnung und Korrelation von Bildern werden die Anpassungsf\"ahigkeit und die Wirtschaftlichkeit dieses Verfahrens noch steigern.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1985High.pdf},
    }

  • W. Förstner, “Prinzip und Leistungsfähigkeit der Korrelation und der Zuordnung digitaler Bilder,” in Photogrammetrische Woche, Stuttgart, 1985.
    [BibTeX] [PDF]
    [none]
    @InProceedings{forstner1985prinzip,
    title = {Prinzip und Leistungsf\"ahigkeit der Korrelation und der Zuordnung digitaler Bilder},
    author = {F\"orstner, Wolfgang},
    booktitle = {Photogrammetrische Woche},
    year = {1985},
    address = {Stuttgart},
    abstract = {[none]},
    city = {Bonn},
    proceeding = {Photogrammetrische Woche 1985},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1985Prinzip.pdf},
    }

1984

  • W. Förstner, “Quality Assessment of Object Location and Point Transfer Using Digital Image Correlation Techniques,” in International Archives of Photogrammetry, Rio de Janeiro, 1984.
    [BibTeX] [PDF]

    The paper discusses aspects of evaluating the results of digital correlation used in photogrammetric high precision application. The most common correlation techniques are compared with respect to thier optimization criteria. Results from practical and theoretical investigations concerning the sensitivity of the methods with respect to deviations of the mathematical model from reality are given. The aim of the paper is to provide some insight into the dependency of the main parameters of digital image correlation on the image texture, e.g. the pixel and the patch size, the quality of approximate values, the influence of unmodeled geometric distortions or of correlated noise. The results are useful for increasing the adaptility of the methods.

    @InProceedings{forstner1984quality,
    title = {Quality Assessment of Object Location and Point Transfer Using Digital Image Correlation Techniques},
    author = {F\"orstner, Wolfgang},
    booktitle = {International Archives of Photogrammetry},
    year = {1984},
    address = {Rio de Janeiro},
    abstract = {The paper discusses aspects of evaluating the results of digital correlation used in photogrammetric high precision application. The most common correlation techniques are compared with respect to thier optimization criteria. Results from practical and theoretical investigations concerning the sensitivity of the methods with respect to deviations of the mathematical model from reality are given. The aim of the paper is to provide some insight into the dependency of the main parameters of digital image correlation on the image texture, e.g. the pixel and the patch size, the quality of approximate values, the influence of unmodeled geometric distortions or of correlated noise. The results are useful for increasing the adaptility of the methods.},
    city = {Bonn},
    proceeding = {International Archives of Photogrammetry},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1984Quality.pdf},
    }

  • F. C. Paderes, E. M. Mikhail, and W. Förstner, “Rectification of Single and Multiple Frames of Satellite Scanner Imagery Using Points and Edges as Control,” in Proc. of the 2nd Annual NASA Symposium on Mathematical Pattern Recognition & Image Analysis, College Station, TX 77843, 1984, p. 92.
    [BibTeX] [PDF]

    Rectification of single and overlapping multiple scanner frames is carried out using a newly developed comprehensive parametric model. Tests with both simulated and real image data have proven, that this model in general is superior to the widely used polynomial model; and that the simultaneous rectification of overlapping frames using least squares techniques yields a higher accuracy than single frame rectification due to the inclusion of tie points between the image frames. Used as control, edges or lines, which are much more likely to be found in images, can replace conventional control points and can easily be implemented into the least squares approach. An efficient algorithm for finding corresponding points in image pairs has been developed which can be used for determining tie points between image frames and thus increase the economy of the whole rectification procedure.

    @InProceedings{paderes*84:rectification,
    author = {Paderes, F. C. and Mikhail, E. M. and F{\"o}rstner, W.},
    title = {{Rectification of Single and Multiple Frames of Satellite Scanner Imagery Using Points and Edges as Control}},
    booktitle = {Proc. of the 2nd Annual NASA Symposium on Mathematical Pattern Recognition \& Image Analysis},
    year = {1984},
    editor = {Guseman L. F.},
    organization = {NASA Johnson Space Center},
    publisher = {Texas A \& M University},
    month = {jul},
    pages = {92},
    address = {College Station, TX 77843},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1984Rectification.pdf},
    abstract = {Rectification of single and overlapping multiple scanner frames is carried out using a newly developed comprehensive parametric model. Tests with both simulated and real image data have proven, that this model in general is superior to the widely used polynomial model; and that the simultaneous rectification of overlapping frames using least squares techniques yields a higher accuracy than single frame rectification due to the inclusion of tie points between the image frames. Used as control, edges or lines, which are much more likely to be found in images, can replace conventional control points and can easily be implemented into the least squares approach. An efficient algorithm for finding corresponding points in image pairs has been developed which can be used for determining tie points between image frames and thus increase the economy of the whole rectification procedure.},
    }

1983

  • W. Förstner, “On the Morphological Quality of Digital Elevation Models,” in Proc. of the ISPRS Comm. III/WG 3 International Colloquium on Mathematical Aspects of Digital Elevation Models, Photogrammetric Data Acquisition Terrain Modelling, Accuracy, Stockholm, 1983, p. 6.1–6.18.
    [BibTeX] [PDF]

    The paper discusses the morphological quality of digital elevation models (DEM). Quality is understood as the precision and the reliability of the height, the slope and the curvature at interpolated points. Whereas precision is described by the standard deviation, reliability – according to Baarda – describes the effect of incorrect heights or incorrect assumptions about the type of the terrain onto the interpolated DEM. First the influence of the sampling intervall onto the representation of the morphology of profiles with different spectra is discussed. It is shown that the sampling intervall leading to a preset relative height fidelity is not sufficient to reach an acceptable representation of the slope or even the curvature of the terrain, provided all frequencies are of equal interest. Therefore the effect of additional form measurements (slopes and curvatures) onto the quality of the interpolated DEM is investigated. Using the method of finite elements it is shown, that additional measurements of slopes lead to an increase of precision and reliability of appr. a factor 1.4, thus the maximum influence of nondetectable errors is decreased by factor 2. It is shown that in addition to the power spectrum the distribution of the modelling stochastic process is decisive for the average sampling density, at the same time suggesting to sample the terrain by data compression using form elements.

    @InProceedings{forstner1983morphological,
    title = {On the Morphological Quality of Digital Elevation Models},
    author = {F\"orstner, Wolfgang},
    booktitle = {Proc. of the ISPRS Comm. III/WG 3 International Colloquium on Mathematical Aspects of Digital Elevation Models, Photogrammetric Data Acquisition Terrain Modelling, Accuracy},
    year = {1983},
    address = {Stockholm},
    pages = {6.1--6.18},
    abstract = {The paper discusses the morphological quality of digital elevation models (DEM). Quality is understood as the precision and the reliability of the height, the slope and the curvature at interpolated points. Whereas precision is described by the standard deviation, reliability - according to Baarda - describes the effect of incorrect heights or incorrect assumptions about the type of the terrain onto the interpolated DEM. First the influence of the sampling intervall onto the representation of the morphology of profiles with different spectra is discussed. It is shown that the sampling intervall leading to a preset relative height fidelity is not sufficient to reach an acceptable representation of the slope or even the curvature of the terrain, provided all frequencies are of equal interest. Therefore the effect of additional form measurements (slopes and curvatures) onto the quality of the interpolated DEM is investigated. Using the method of finite elements it is shown, that additional measurements of slopes lead to an increase of precision and reliability of appr. a factor 1.4, thus the maximum influence of nondetectable errors is decreased by factor 2. It is shown that in addition to the power spectrum the distribution of the modelling stochastic process is decisive for the average sampling density, at the same time suggesting to sample the terrain by data compression using form elements.},
    city = {Bonn},
    proceeding = {Proc. of the ISPRS Comm. III/WG 3 International Colloquium on Mathematical Aspects of Digital Elevation Models, Photogrammetric Data Acquisition Terrain Modelling, Accuracy},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1983Morphological.pdf},
    }

1982

  • W. Förstner, “On the Geometric Precision of Digital Correlation,” in Proc. of the ISPRS Symposium Mathematical Models, Accuray Aspects and Quality Control, Finland, 1982, p. 176–189.
    [BibTeX] [PDF]

    The geometric precision of digital correlation can be described by the standard deviation of the estimated shift. The paper shows how the precision depends on the signal to noise ratio, the number of pixels involved and the texture of the object and discusses the choice of a low pass filter which minimizes the variance of the estimated location in order to obtain on optimal sampling frequency.

    @InProceedings{forstner1982geometric,
    title = {On the Geometric Precision of Digital Correlation},
    author = {F\"orstner, Wolfgang},
    booktitle = {Proc. of the ISPRS Symposium Mathematical Models, Accuray Aspects and Quality Control},
    year = {1982},
    address = {Finland},
    pages = {176--189},
    abstract = {The geometric precision of digital correlation can be described by the standard deviation of the estimated shift. The paper shows how the precision depends on the signal to noise ratio, the number of pixels involved and the texture of the object and discusses the choice of a low pass filter which minimizes the variance of the estimated location in order to obtain on optimal sampling frequency.},
    city = {Bonn},
    proceeding = {Proc. of the ISPRS Symposium Mathematical Models, Accuray Aspects and Quality Control},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1982Geometric.pdf},
    }

  • W. Förstner, “Systematic Errors in Photogrammetric Point Determination,” in Proc. Survey Control Networks, International Federationof Surveyors (FIG), Meeting Study Group 5B, Denmark, 1982, p. 197–209.
    [BibTeX] [PDF]

    The refinement of the functional model used for photogrammetric point determination has lead to a significant increase of the accuracy, being about 3-8 $\mu$m at photoscale. It is discussed how the functional or the stochastical model may be further refined to compensate for varying, systematic effects and for local distortions which are caused by time-dependent changes of the flight an measuring conditions.

    @InProceedings{forstner1982systematic,
    title = {Systematic Errors in Photogrammetric Point Determination},
    author = {F\"orstner, Wolfgang},
    booktitle = {Proc. Survey Control Networks, International Federationof Surveyors (FIG), Meeting Study Group 5B},
    year = {1982},
    address = {Denmark},
    pages = {197--209},
    abstract = {The refinement of the functional model used for photogrammetric point determination has lead to a significant increase of the accuracy, being about 3-8 $\mu$m at photoscale. It is discussed how the functional or the stochastical model may be further refined to compensate for varying, systematic effects and for local distortions which are caused by time-dependent changes of the flight an measuring conditions.},
    city = {Bonn},
    proceeding = {Proc. Survey Control Networks, International Federationof Surveyors (FIG), Meeting Study Group 5B},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1982Systematic.pdf},
    }

1981

  • W. Förstner, “Reliability and Discernability of Extended Gauss-Markov Models,” in Proc. of the International Symposium on Geodetic Networks and Computations, 1981.
    [BibTeX] [PDF]

    none

    @InProceedings{forstner1981reliability,
    title = {Reliability and Discernability of Extended Gauss-Markov Models},
    author = {F\"orstner, Wolfgang},
    booktitle = {Proc. of the International Symposium on Geodetic Networks and Computations},
    year = {1981},
    number = {258},
    publisher = {Deutsche Geod\E4tische Kommission, Reihe B},
    abstract = {none},
    timestamp = {2013.03.19},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1981Reliability.pdf},
    }

  • W. Förstner and R. Schroth, “On the Estimation of Covariance Matrices for Photogrammetric Image Coordinates,” in Proc. of the International Symposium on Geodetic Networks and Computations, 1981.
    [BibTeX] [PDF]

    none

    @InProceedings{forstner1981estimation,
    title = {On the Estimation of Covariance Matrices for Photogrammetric Image Coordinates},
    author = {F\"orstner, Wolfgang and Schroth, Ralf},
    booktitle = {Proc. of the International Symposium on Geodetic Networks and Computations},
    year = {1981},
    number = {258},
    publisher = {Deutsche Geod\E4tische Kommission, Reihe B},
    abstract = {none},
    timestamp = {2013.03.19},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1982Estimation.pdf},
    }

1979

  • W. Förstner, “Das Programm TRINA zur Ausgleichung und Gütebeurteilung geodätischer Lagenetze,” ZfV – Zeitschrift für Vermessungswesen, iss. 2, p. 61–72, 1979.
    [BibTeX] [PDF]

    The article describes a new computerprogram (TRINA) for the trigonometric net adjustment. The program (FORTRAN IV) was written by the author at the Landesvermessungsamt Nordrhein-Westfalen. It serves for estimating the reliability of horizontal geodetic nets based on the theory of BAARDA. The program includes a statistical test (“data-snooping”) for the detection of gross errors in observations as well as in given coordinates. It also offers a possibility of estimating the weights of the observations (a posteriori variance estimation). An example illustrates how the program finds out the weak parts of the nets and saves the comparison of the results with the net diagram.

    @Article{forstner1979das,
    title = {Das Programm {TRINA} zur Ausgleichung und G\"utebeurteilung geod\"atischer Lagenetze},
    author = {F\"orstner, Wolfgang},
    journal = {ZfV - Zeitschrift f\"ur Vermessungswesen},
    year = {1979},
    number = {2},
    pages = {61--72},
    abstract = {The article describes a new computerprogram (TRINA) for the trigonometric net adjustment. The program (FORTRAN IV) was written by the author at the Landesvermessungsamt Nordrhein-Westfalen. It serves for estimating the reliability of horizontal geodetic nets based on the theory of BAARDA. The program includes a statistical test ("data-snooping") for the detection of gross errors in observations as well as in given coordinates. It also offers a possibility of estimating the weights of the observations (a posteriori variance estimation). An example illustrates how the program finds out the weak parts of the nets and saves the comparison of the results with the net diagram.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1979Das.pdf},
    }

  • W. Förstner, “Ein Verfahren zur Schätzung von Varianz- und Kovarianzkomponenten,” Allgemeine Vermessungsnachrichten, vol. Heft 11-12, p. 446–453, 1979.
    [BibTeX] [PDF]
    [none]
    @Article{forstner1979ein,
    title = {Ein Verfahren zur Sch\"atzung von Varianz- und Kovarianzkomponenten},
    author = {F\"orstner, Wolfgang},
    journal = {Allgemeine Vermessungsnachrichten},
    year = {1979},
    pages = {446--453},
    volume = {Heft 11-12},
    abstract = {[none]},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1979Ein.pdf},
    }

  • A. Rosenfeld, “Digital Topology,” American Mathematical Monthly, vol. 86, p. 621–630, 1979.
    [BibTeX] [PDF]

    Digital pictures are rectangular arrays of nonnegative numbers. The analysis of a digital picture usually involves “segmenting” it into parts and measuring various properties of and relationships among the parts. In particular, one often wants to separate out the connected components of a picture subset to determine the adjacency relationships among those components, to track and encode their borders, or to “thin” them down to “skeletons” that have no interiors, without changing their connectedness properties. There are standard algorithms for doing all of these tasks; but to prove that they work, one needs to establish some basic topological properties of digital picture subsets. This paper provides an introduction to the study of such properties, which we call digital topology.

    @Article{rosenfeld1979digital,
    title = {Digital Topology},
    author = {Rosenfeld, Azriel},
    journal = {American Mathematical Monthly},
    year = {1979},
    pages = {621--630},
    volume = {86},
    abstract = {Digital pictures are rectangular arrays of nonnegative numbers. The analysis of a digital picture usually involves "segmenting" it into parts and measuring various properties of and relationships among the parts. In particular, one often wants to separate out the connected components of a picture subset to determine the adjacency relationships among those components, to track and encode their borders, or to "thin" them down to "skeletons" that have no interiors, without changing their connectedness properties. There are standard algorithms for doing all of these tasks; but to prove that they work, one needs to establish some basic topological properties of digital picture subsets. This paper provides an introduction to the study of such properties, which we call digital topology.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Rosenfeld1979Digital.pdf},
    }

1978

  • W. Förstner, “Die Suche nach groben Fehlern in photogrammetrischen Lageblöcken,” PhD Thesis, 1978.
    [BibTeX] [PDF]

    In der vorliegenden Arbeit werden die Voraussetzungen und Möglichkeiten der automatisierten Suche grober Fehler in photogrammetrischen Lageblöcken untersucht. Mit Hilfe statistischer Methoden wird nachgewiesen, daß sich eine hohe Zuverlässigkeit photogrammetrisch bestimmter Koordinaten mit nur geringem zusätzlichen Meßaufwand erreichen läßt. Gegenüber herkömmlichen Tests ermöglicht die Verwendung statistisch fundierter Testverfahren dabei nicht nur die Lokalisierung wesentlich kleinerer grober Fehler, sondern auch die sichere Erfassung großer grober Fehler. Für die Anregung zu dieser Arbeit und die wertvollen Hinweise möchte ich Herrn Prof. Dr. – Ing. F. Ackermann vielmals danken. Auch bin ich Herrn Prof. Dr. – Ing. G. Kupfer dafür dankbar, dass er mir die Rechenzeit am Rechenzentrum der Universität Bonn zur Verfügung stellte.

    @PhDThesis{forstner1978die,
    title = {Die Suche nach groben Fehlern in photogrammetrischen Lagebl\"ocken},
    author = {F\"orstner,Wolfgang},
    school = {Institut f\"ur Photogrammetrie, Universit\"at Stuttgart},
    year = {1978},
    abstract = {In der vorliegenden Arbeit werden die Voraussetzungen und M\"oglichkeiten der automatisierten Suche grober Fehler in photogrammetrischen Lagebl\"ocken untersucht. Mit Hilfe statistischer Methoden wird nachgewiesen, da{\ss} sich eine hohe Zuverl\"assigkeit photogrammetrisch bestimmter Koordinaten mit nur geringem zus\"atzlichen Me{\ss}aufwand erreichen l\"a{\ss}t. Gegen\"uber herk\"ommlichen Tests erm\"oglicht die Verwendung statistisch fundierter Testverfahren dabei nicht nur die Lokalisierung wesentlich kleinerer grober Fehler, sondern auch die sichere Erfassung gro{\ss}er grober Fehler. F\"ur die Anregung zu dieser Arbeit und die wertvollen Hinweise m\"ochte ich Herrn Prof. Dr. - Ing. F. Ackermann vielmals danken. Auch bin ich Herrn Prof. Dr. - Ing. G. Kupfer daf\"ur dankbar, dass er mir die Rechenzeit am Rechenzentrum der Universit\"at Bonn zur Verf\"ugung stellte.},
    url = {https://www.ipb.uni-bonn.de/pdfs/Forstner1978Die.pdf},
    }

1972

  • W. Förstner, “Photogrammetrische Punktbestimmung aus extrem großmaßstäbigen Bildern – Der Versuch Böhmenkirch,” Allgemeine Vermessungsnachrichten, vol. Nr. 7, p. 271–281, 1972.
    [BibTeX] [PDF]
    [none]
    @Article{foerstner1972photogrammetrische,
    title = {Photogrammetrische Punktbestimmung aus extrem gro{\ss}ma{\ss}stäbigen Bildern - Der Versuch B\"ohmenkirch},
    author = {F\"orstner, Wolfgang},
    journal = {Allgemeine Vermessungsnachrichten},
    year = {1972},
    pages = {271--281},
    volume = {Nr. 7},
    abstract = {[none]},
    url = {https://www.ipb.uni-bonn.de/pdfs/foerstner72photogrammetrische.pdf},
    }