Publications from 2001

2024
Wolski, K., Djeacoumar, A., Javanmardi, A., et al. 2024. Learning Images Across Scales Using Adversarial Training. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2024)43, 4.
Export
BibTeX
@article{Wolski24, TITLE = {Learning Images Across Scales Using Adversarial Training}, AUTHOR = {Wolski, Krzysztof and Djeacoumar, Adarsh and Javanmardi, Alireza and Seidel, Hans-Peter and Theobalt, Christian and Cordonnier, Guillaume and Myszkowski, Karol and Drettakis, George and Pan, Xingang and Leimk{\"u}hler, Thomas}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3658190}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2024}, DATE = {2024}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {43}, NUMBER = {4}, PAGES = {1--13}, EID = {131}, BOOKTITLE = {Proceedings of the SIGGRAPH Conference (ACM SIGGRAPH 2024)}, }
Endnote
%0 Journal Article %A Wolski, Krzysztof %A Djeacoumar, Adarsh %A Javanmardi, Alireza %A Seidel, Hans-Peter %A Theobalt, Christian %A Cordonnier, Guillaume %A Myszkowski, Karol %A Drettakis, George %A Pan, Xingang %A Leimkühler, Thomas %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society %T Learning Images Across Scales Using Adversarial Training : %G eng %U http://hdl.handle.net/21.11116/0000-000F-EED1-9 %R 10.1145/3658190 %7 2024-07-19 %D 2024 %J ACM Transactions on Graphics %V 43 %N 4 %& 1 %P 1 - 13 %Z sequence number: 131 %I ACM %C New York, NY %@ false %B Proceedings of the SIGGRAPH Conference %O ACM SIGGRAPH 2024 Denver, CO, USA, July 28 - Aug 1
Wang, C., Wolski, K., Kerbl, B., et al. 2024. Cinematic Gaussians: Real-Time HDR Radiance Fields with Depth. Computer Graphics Forum (Proc. Pacific Graphics 2024)43, 7.
Export
BibTeX
@article{WangPG24, TITLE = {Cinematic {G}aussians: {R}eal-Time {HDR} Radiance Fields with Depth}, AUTHOR = {Wang, Chao and Wolski, Krzysztof and Kerbl, Bernhard and Serrano, Ana and Bemana, Mojtaba and Seidel, Hans-Peter and Myszkowski, Karol and Leimk{\"u}hler, Thomas}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.15214}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2024}, DATE = {2024}, JOURNAL = {Computer Graphics Forum (Proc. Pacific Graphics)}, VOLUME = {43}, NUMBER = {7}, PAGES = {1--13}, BOOKTITLE = {The 32nd Pacific Conference on Computer Graphics and Applications (Pacific Graphics 2024)}, EDITOR = {Chen, Renjie and Ritschel, Tobias and Whiting, Emily}, }
Endnote
%0 Journal Article %A Wang, Chao %A Wolski, Krzysztof %A Kerbl, Bernhard %A Serrano, Ana %A Bemana, Mojtaba %A Seidel, Hans-Peter %A Myszkowski, Karol %A Leimkühler, Thomas %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society %T Cinematic Gaussians: Real-Time HDR Radiance Fields with Depth : %G eng %U http://hdl.handle.net/21.11116/0000-000F-FC3A-5 %R 10.1111/cgf.15214 %7 2024 %D 2024 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 43 %N 7 %& 1 %P 1 - 13 %I Blackwell-Wiley %C Oxford %@ false %B The 32nd Pacific Conference on Computer Graphics and Applications %O Pacific Graphics 2024 PG 2024 Huangshan (Yellow Mountain), China ; October 13 – 16, 2024
Ruan, L., Bálint, M., Bemana, M., et al. 2024. Self-Supervised Video Defocus Deblurring with Atlas Learning. Proceedings SIGGRAPH 2024 Conference Papers, ACM.
Export
BibTeX
@inproceedings{Ruan_SIGGRAPH24, TITLE = {Self-Supervised Video Defocus Deblurring with Atlas Learning}, AUTHOR = {Ruan, Lingyan and B{\'a}lint, Martin and Bemana, Mojtaba and Wolski, Krzysztof and Seidel, Hans-Peter and Myszkowski, Karol and Chen, Bin}, LANGUAGE = {eng}, ISBN = {979-8-4007-0525-0}, DOI = {10.1145/3641519.3657524}, PUBLISHER = {ACM}, YEAR = {2024}, DATE = {2024}, BOOKTITLE = {Proceedings SIGGRAPH 2024 Conference Papers}, EDITOR = {Burgano, Andres and Zorin, Denis and Jarosz, Wojciech}, PAGES = {1--11}, EID = {120}, ADDRESS = {Denver, CO, USA}, }
Endnote
%0 Conference Proceedings %A Ruan, Lingyan %A Bálint, Martin %A Bemana, Mojtaba %A Wolski, Krzysztof %A Seidel, Hans-Peter %A Myszkowski, Karol %A Chen, Bin %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Self-Supervised Video Defocus Deblurring with Atlas Learning : %G eng %U http://hdl.handle.net/21.11116/0000-000F-FC3F-0 %R 10.1145/3641519.3657524 %D 2024 %B ACM SIGGRAPH Conference %Z date of event: 2024-07-28 - 2024-08-01 %C Denver, CO, USA %B Proceedings SIGGRAPH 2024 Conference Papers %E Burgano, Andres; Zorin, Denis; Jarosz, Wojciech %P 1 - 11 %Z sequence number: 120 %I ACM %@ 979-8-4007-0525-0
Mujkanovic, F., Nsampi, N.E., Theobalt, C., Seidel, H.-P., and Leimkühler, T. 2024. Neural Gaussian Scale-Space Fields. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2024)43, 4.
Export
BibTeX
@article{Mujkanovic24, TITLE = {Neural {G}aussian Scale-Space Fields}, AUTHOR = {Mujkanovic, Felix and Nsampi, Ntumba Elie and Theobalt, Christian and Seidel, Hans-Peter and Leimk{\"u}hler, Thomas}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3658163}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2024}, DATE = {2024}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {43}, NUMBER = {4}, PAGES = {1--15}, EID = {134}, BOOKTITLE = {Proceedings of the SIGGRAPH Conference (ACM SIGGRAPH 2024)}, }
Endnote
%0 Journal Article %A Mujkanovic, Felix %A Nsampi, Ntumba Elie %A Theobalt, Christian %A Seidel, Hans-Peter %A Leimkühler, Thomas %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society %T Neural Gaussian Scale-Space Fields : %G eng %U http://hdl.handle.net/21.11116/0000-000F-EE1D-6 %R 10.1145/3658163 %7 2024-07-19 %D 2024 %J ACM Transactions on Graphics %V 43 %N 4 %& 1 %P 1 - 15 %Z sequence number: 134 %I ACM %C New York, NY %@ false %B Proceedings of the SIGGRAPH Conference %O ACM SIGGRAPH 2024 Denver, CO, USA, July 28 - Aug 1
Jiménez Navarro, D., Peng, X., Zhang, Y., et al. 2024. Accelerating Saccadic Response through Spatial and Temporal Cross-Modal Misalignments. Proceedings SIGGRAPH 2024 Conference Papers, ACM.
Export
BibTeX
@inproceedings{Navarro_SIGGRAPH24, TITLE = {Accelerating Saccadic Response through Spatial and Temporal Cross-Modal Misalignments}, AUTHOR = {Jim{\'e}nez Navarro, Daniel and Peng, Xi and Zhang, Yunxiang and Myszkowski, Karol and Seidel, Hans-Peter and Sun, Qi and Serrano, Ana}, LANGUAGE = {eng}, ISBN = {979-8-4007-0525-0}, DOI = {10.1145/3641519.3657432}, PUBLISHER = {ACM}, YEAR = {2024}, DATE = {2024}, BOOKTITLE = {Proceedings SIGGRAPH 2024 Conference Papers}, EDITOR = {Burgano, Andres and Zorin, Denis and Jarosz, Wojciech}, PAGES = {1--12}, EID = {129}, ADDRESS = {Denver, CO, USA}, }
Endnote
%0 Conference Proceedings %A Jiménez Navarro, Daniel %A Peng, Xi %A Zhang, Yunxiang %A Myszkowski, Karol %A Seidel, Hans-Peter %A Sun, Qi %A Serrano, Ana %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Accelerating Saccadic Response through Spatial and Temporal Cross-Modal Misalignments : %G eng %U http://hdl.handle.net/21.11116/0000-000F-FC10-3 %R 10.1145/3641519.3657432 %D 2024 %B ACM SIGGRAPH Conference %Z date of event: 2024-07-28 - 2024-08-01 %C Denver, CO, USA %B Proceedings SIGGRAPH 2024 Conference Papers %E Burgano, Andres; Zorin, Denis; Jarosz, Wojciech %P 1 - 12 %Z sequence number: 129 %I ACM %@ 979-8-4007-0525-0
Çoğalan, U., Bemana, M., Seidel, H.-P., and Myszkowski, K. 2024. Enhancing Image Quality Prediction with Self-supervised Visual Masking. Computer Graphics Forum (Proc. EUROGRAPHICS 2024)43, 2.
Export
BibTeX
@article{cogalan_Eurographics24, TITLE = {Enhancing Image Quality Prediction with Self-supervised Visual Masking}, AUTHOR = {{\c C}o{\u g}alan, U{\u g}ur and Bemana, Mojtaba and Seidel, Hans-Peter and Myszkowski, Karol}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.15051}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2024}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {43}, NUMBER = {2}, PAGES = {1--12}, EID = {e15051}, BOOKTITLE = {45th Annual Conference of the European Association for Computer Graphics (EUROGRAPHICS 2024)}, EDITOR = {Bermano, A. and Kaolgerakis, E.}, }
Endnote
%0 Journal Article %A Çoğalan, Uğur %A Bemana, Mojtaba %A Seidel, Hans-Peter %A Myszkowski, Karol %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Enhancing Image Quality Prediction with Self-supervised Visual Masking : %G eng %U http://hdl.handle.net/21.11116/0000-000F-4DAC-A %R 10.1111/cgf.15051 %7 2024 %D 2024 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 43 %N 2 %& 1 %P 1 - 12 %Z sequence number: e15051 %I Blackwell-Wiley %C Oxford %@ false %B 45th Annual Conference of the European Association for Computer Graphics %O EUROGRAPHICS 2024 EG 2024 Limassol, Cyprus, April 22-26 %U https://onlinelibrary.wiley.com/doi/epdf/10.1111/cgf.15051
Bemana, M., Leimkühler, T., Myszkowski, K., Seidel, H.-P., and Ritschel, T. 2024. Exposure Diffusion: HDR Image Generation by Consistent LDR denoising. https://arxiv.org/abs/2405.14304.
(arXiv: 2405.14304)
Abstract
We demonstrate generating high-dynamic range (HDR) images using the concerted<br>action of multiple black-box, pre-trained low-dynamic range (LDR) image<br>diffusion models. Common diffusion models are not HDR as, first, there is no<br>sufficiently large HDR image dataset available to re-train them, and second,<br>even if it was, re-training such models is impossible for most compute budgets.<br>Instead, we seek inspiration from the HDR image capture literature that<br>traditionally fuses sets of LDR images, called "brackets", to produce a single<br>HDR image. We operate multiple denoising processes to generate multiple LDR<br>brackets that together form a valid HDR result. To this end, we introduce an<br>exposure consistency term into the diffusion process to couple the brackets<br>such that they agree across the exposure range they share. We demonstrate HDR<br>versions of state-of-the-art unconditional and conditional as well as<br>restoration-type (LDR2HDR) generative modeling.<br>
Export
BibTeX
@online{Bemana_2405.14304, TITLE = {Exposure Diffusion: {HDR} Image Generation by Consistent {LDR} denoising}, AUTHOR = {Bemana, Mojtaba and Leimk{\"u}hler, Thomas and Myszkowski, Karol and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2405.14304}, EPRINT = {2405.14304}, EPRINTTYPE = {arXiv}, YEAR = {2024}, ABSTRACT = {We demonstrate generating high-dynamic range (HDR) images using the concerted<br>action of multiple black-box, pre-trained low-dynamic range (LDR) image<br>diffusion models. Common diffusion models are not HDR as, first, there is no<br>sufficiently large HDR image dataset available to re-train them, and second,<br>even if it was, re-training such models is impossible for most compute budgets.<br>Instead, we seek inspiration from the HDR image capture literature that<br>traditionally fuses sets of LDR images, called "brackets", to produce a single<br>HDR image. We operate multiple denoising processes to generate multiple LDR<br>brackets that together form a valid HDR result. To this end, we introduce an<br>exposure consistency term into the diffusion process to couple the brackets<br>such that they agree across the exposure range they share. We demonstrate HDR<br>versions of state-of-the-art unconditional and conditional as well as<br>restoration-type (LDR2HDR) generative modeling.<br>}, }
Endnote
%0 Report %A Bemana, Mojtaba %A Leimk&#252;hler, Thomas %A Myszkowski, Karol %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Exposure Diffusion: HDR Image Generation by Consistent LDR denoising : %G eng %U http://hdl.handle.net/21.11116/0000-0010-1074-9 %U https://arxiv.org/abs/2405.14304 %D 2024 %X We demonstrate generating high-dynamic range (HDR) images using the concerted<br>action of multiple black-box, pre-trained low-dynamic range (LDR) image<br>diffusion models. Common diffusion models are not HDR as, first, there is no<br>sufficiently large HDR image dataset available to re-train them, and second,<br>even if it was, re-training such models is impossible for most compute budgets.<br>Instead, we seek inspiration from the HDR image capture literature that<br>traditionally fuses sets of LDR images, called "brackets", to produce a single<br>HDR image. We operate multiple denoising processes to generate multiple LDR<br>brackets that together form a valid HDR result. To this end, we introduce an<br>exposure consistency term into the diffusion process to couple the brackets<br>such that they agree across the exposure range they share. We demonstrate HDR<br>versions of state-of-the-art unconditional and conditional as well as<br>restoration-type (LDR2HDR) generative modeling.<br> %K Computer Science, Graphics, cs.GR,Computer Science, Computer Vision and Pattern Recognition, cs.CV,eess.IV
2023
Weinrauch, A., Seidel, H.-P., Mlakar, D., Steinberger, M., and Zayer, R. 2023. A Variational Loop Shrinking Analogy for Handle and Tunnel Detection and Reeb Graph Construction on Surfaces. Computer Graphics Forum42, 2.
Export
BibTeX
@article{Weinrauch_CGF23, TITLE = {A Variational Loop Shrinking Analogy for Handle and Tunnel Detection and {Reeb} Graph Construction on Surfaces}, AUTHOR = {Weinrauch, Alexander and Seidel, Hans-Peter and Mlakar, Daniel and Steinberger, Markus and Zayer, Rhaleb}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.14763}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2023}, MARGINALMARK = {$\bullet$}, JOURNAL = {Computer Graphics Forum}, VOLUME = {42}, NUMBER = {2}, PAGES = {309--320}, }
Endnote
%0 Journal Article %A Weinrauch, Alexander %A Seidel, Hans-Peter %A Mlakar, Daniel %A Steinberger, Markus %A Zayer, Rhaleb %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T A Variational Loop Shrinking Analogy for Handle and Tunnel Detection and Reeb Graph Construction on Surfaces : %G eng %U http://hdl.handle.net/21.11116/0000-000C-B851-9 %R 10.1111/cgf.14763 %7 2023 %D 2023 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 42 %N 2 %& 309 %P 309 - 320 %I Blackwell-Wiley %C Oxford %@ false
Wang, C., Serrano, A., Pan, X., et al. 2023a. An Implicit Neural Representation for the Image Stack: Depth, All in Focus, and High Dynamic Range. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2023)42, 6.
Export
BibTeX
@article{Wang_SIGGRAPHASIA23, TITLE = {An Implicit Neural Representation for the Image Stack: {D}epth, All in Focus, and High Dynamic Range}, AUTHOR = {Wang, Chao and Serrano, Ana and Pan, Xingang and Wolski, Krzysztof and Chen, Bin and Myszkowski, Karol and Seidel, Hans-Peter and Theobalt, Christian and Leimk{\"u}hler, Thomas}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3618367}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2023}, MARGINALMARK = {$\bullet$}, DATE = {2023}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {42}, NUMBER = {6}, PAGES = {1--11}, EID = {221}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2023}, }
Endnote
%0 Journal Article %A Wang, Chao %A Serrano, Ana %A Pan, Xingang %A Wolski, Krzysztof %A Chen, Bin %A Myszkowski, Karol %A Seidel, Hans-Peter %A Theobalt, Christian %A Leimk&#252;hler, Thomas %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society %T An Implicit Neural Representation for the Image Stack: Depth, All in Focus, and High Dynamic Range : %G eng %U http://hdl.handle.net/21.11116/0000-000D-B80B-8 %R 10.1145/3618367 %7 2023 %D 2023 %J ACM Transactions on Graphics %V 42 %N 6 %& 1 %P 1 - 11 %Z sequence number: 221 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2023 %O ACM SIGGRAPH Asia 2023 Sydney, Australia, 12-15 December 2023 SA '23 SA 2023
Wang, C., Serrano, A., Pan, X., et al. 2023b. GlowGAN: Unsupervised Learning of HDR Images from LDR Images in the Wild. IEEE/CVF International Conference on Computer Vision (ICCV 2023), IEEE.
Export
BibTeX
@inproceedings{wang2023glowgan, TITLE = {{GlowGAN}: {U}nsupervised Learning of {HDR} Images from {LDR} Images in the Wild}, AUTHOR = {Wang, Chao and Serrano, Ana and Pan, Xingang and Chen, Bin and Seidel, Hans-Peter and Theobalt, Christian and Myszkowski, Karol and Leimk{\"u}hler, Thomas}, LANGUAGE = {eng}, ISBN = {979-8-3503-0718-4}, DOI = {10.1109/ICCV51070.2023.00964}, PUBLISHER = {IEEE}, YEAR = {2023}, MARGINALMARK = {$\bullet$}, DATE = {2023}, BOOKTITLE = {IEEE/CVF International Conference on Computer Vision (ICCV 2023)}, PAGES = {10475--10485}, ADDRESS = {Paris, France}, }
Endnote
%0 Conference Proceedings %A Wang, Chao %A Serrano, Ana %A Pan, Xingang %A Chen, Bin %A Seidel, Hans-Peter %A Theobalt, Christian %A Myszkowski, Karol %A Leimk&#252;hler, Thomas %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society %T GlowGAN: Unsupervised Learning of HDR Images from LDR Images in the Wild : %G eng %U http://hdl.handle.net/21.11116/0000-000D-B7FC-9 %R 10.1109/ICCV51070.2023.00964 %D 2023 %B IEEE/CVF International Conference on Computer Vision %Z date of event: 2023-10-01 - 2023-10-06 %C Paris, France %B IEEE/CVF International Conference on Computer Vision %P 10475 - 10485 %I IEEE %@ 979-8-3503-0718-4
Ruan, L., Bemana, M., Seidel, H.-P., Myszkowski, K., and Chen, B. 2023. Revisiting Image Deblurring with an Efficient ConvNet. https://arxiv.org/abs/2302.02234.
(arXiv: 2302.02234)
Abstract
Image deblurring aims to recover the latent sharp image from its blurry<br>counterpart and has a wide range of applications in computer vision. The<br>Convolution Neural Networks (CNNs) have performed well in this domain for many<br>years, and until recently an alternative network architecture, namely<br>Transformer, has demonstrated even stronger performance. One can attribute its<br>superiority to the multi-head self-attention (MHSA) mechanism, which offers a<br>larger receptive field and better input content adaptability than CNNs.<br>However, as MHSA demands high computational costs that grow quadratically with<br>respect to the input resolution, it becomes impractical for high-resolution<br>image deblurring tasks. In this work, we propose a unified lightweight CNN<br>network that features a large effective receptive field (ERF) and demonstrates<br>comparable or even better performance than Transformers while bearing less<br>computational costs. Our key design is an efficient CNN block dubbed LaKD,<br>equipped with a large kernel depth-wise convolution and spatial-channel mixing<br>structure, attaining comparable or larger ERF than Transformers but with a<br>smaller parameter scale. Specifically, we achieve +0.17dB / +0.43dB PSNR over<br>the state-of-the-art Restormer on defocus / motion deblurring benchmark<br>datasets with 32% fewer parameters and 39% fewer MACs. Extensive experiments<br>demonstrate the superior performance of our network and the effectiveness of<br>each module. Furthermore, we propose a compact and intuitive ERFMeter metric<br>that quantitatively characterizes ERF, and shows a high correlation to the<br>network performance. We hope this work can inspire the research community to<br>further explore the pros and cons of CNN and Transformer architectures beyond<br>image deblurring tasks.<br>
Export
BibTeX
@online{ruan2023revisiting, TITLE = {Revisiting Image Deblurring with an Efficient {ConvNet}}, AUTHOR = {Ruan, Lingyan and Bemana, Mojtaba and Seidel, Hans-Peter and Myszkowski, Karol and Chen, Bin}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2302.02234}, EPRINT = {2302.02234}, EPRINTTYPE = {arXiv}, YEAR = {2023}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Image deblurring aims to recover the latent sharp image from its blurry<br>counterpart and has a wide range of applications in computer vision. The<br>Convolution Neural Networks (CNNs) have performed well in this domain for many<br>years, and until recently an alternative network architecture, namely<br>Transformer, has demonstrated even stronger performance. One can attribute its<br>superiority to the multi-head self-attention (MHSA) mechanism, which offers a<br>larger receptive field and better input content adaptability than CNNs.<br>However, as MHSA demands high computational costs that grow quadratically with<br>respect to the input resolution, it becomes impractical for high-resolution<br>image deblurring tasks. In this work, we propose a unified lightweight CNN<br>network that features a large effective receptive field (ERF) and demonstrates<br>comparable or even better performance than Transformers while bearing less<br>computational costs. Our key design is an efficient CNN block dubbed LaKD,<br>equipped with a large kernel depth-wise convolution and spatial-channel mixing<br>structure, attaining comparable or larger ERF than Transformers but with a<br>smaller parameter scale. Specifically, we achieve +0.17dB / +0.43dB PSNR over<br>the state-of-the-art Restormer on defocus / motion deblurring benchmark<br>datasets with 32% fewer parameters and 39% fewer MACs. Extensive experiments<br>demonstrate the superior performance of our network and the effectiveness of<br>each module. Furthermore, we propose a compact and intuitive ERFMeter metric<br>that quantitatively characterizes ERF, and shows a high correlation to the<br>network performance. We hope this work can inspire the research community to<br>further explore the pros and cons of CNN and Transformer architectures beyond<br>image deblurring tasks.<br>}, }
Endnote
%0 Report %A Ruan, Lingyan %A Bemana, Mojtaba %A Seidel, Hans-Peter %A Myszkowski, Karol %A Chen, Bin %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Revisiting Image Deblurring with an Efficient ConvNet : %G eng %U http://hdl.handle.net/21.11116/0000-000C-C7B9-3 %U https://arxiv.org/abs/2302.02234 %D 2023 %X Image deblurring aims to recover the latent sharp image from its blurry<br>counterpart and has a wide range of applications in computer vision. The<br>Convolution Neural Networks (CNNs) have performed well in this domain for many<br>years, and until recently an alternative network architecture, namely<br>Transformer, has demonstrated even stronger performance. One can attribute its<br>superiority to the multi-head self-attention (MHSA) mechanism, which offers a<br>larger receptive field and better input content adaptability than CNNs.<br>However, as MHSA demands high computational costs that grow quadratically with<br>respect to the input resolution, it becomes impractical for high-resolution<br>image deblurring tasks. In this work, we propose a unified lightweight CNN<br>network that features a large effective receptive field (ERF) and demonstrates<br>comparable or even better performance than Transformers while bearing less<br>computational costs. Our key design is an efficient CNN block dubbed LaKD,<br>equipped with a large kernel depth-wise convolution and spatial-channel mixing<br>structure, attaining comparable or larger ERF than Transformers but with a<br>smaller parameter scale. Specifically, we achieve +0.17dB / +0.43dB PSNR over<br>the state-of-the-art Restormer on defocus / motion deblurring benchmark<br>datasets with 32% fewer parameters and 39% fewer MACs. Extensive experiments<br>demonstrate the superior performance of our network and the effectiveness of<br>each module. Furthermore, we propose a compact and intuitive ERFMeter metric<br>that quantitatively characterizes ERF, and shows a high correlation to the<br>network performance. We hope this work can inspire the research community to<br>further explore the pros and cons of CNN and Transformer architectures beyond<br>image deblurring tasks.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Nsampi, N.E., Djeacoumar, A., Seidel, H.-P., Ritschel, T., and Leimkühler, T. 2023. Neural Field Convolutions by Repeated Differentiation. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2023)42, 6.
Export
BibTeX
@article{Nsampi_SIGGRAPHASIA23, TITLE = {Neural Field Convolutions by Repeated Differentiation}, AUTHOR = {Nsampi, Ntumba Elie and Djeacoumar, Adarsh and Seidel, Hans-Peter and Ritschel, Tobias and Leimk{\"u}hler, Thomas}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3618340}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2023}, MARGINALMARK = {$\bullet$}, DATE = {2023}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {42}, NUMBER = {6}, PAGES = {1--11}, EID = {206}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2023}, }
Endnote
%0 Journal Article %A Nsampi, Ntumba Elie %A Djeacoumar, Adarsh %A Seidel, Hans-Peter %A Ritschel, Tobias %A Leimk&#252;hler, Thomas %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society %T Neural Field Convolutions by Repeated Differentiation : %G eng %U http://hdl.handle.net/21.11116/0000-000E-691E-C %R 10.1145/3618340 %7 2023 %D 2023 %J ACM Transactions on Graphics %V 42 %N 6 %& 1 %P 1 - 11 %Z sequence number: 206 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2023 %O ACM SIGGRAPH Asia 2023 Sydney, Australia, 12-15 December 2023 SA '23 SA 2023
Liao, K., Tricard, T., Piovarči, M., Seidel, H.-P., and Babaei, V. 2023. Learning Deposition Policies for Fused Multi-Material 3D Printing. IEEE International Conference on Robotics and Automation (ICRA 2023), IEEE.
Export
BibTeX
@inproceedings{Liao_ICRA2023, TITLE = {Learning Deposition Policies for Fused Multi-Material {3D} Printing}, AUTHOR = {Liao, Kang and Tricard, Thibault and Piovar{\v c}i, Michal and Seidel, Hans-Peter and Babaei, Vahid}, LANGUAGE = {eng}, ISBN = {979-8-3503-2365-8}, DOI = {10.1109/ICRA48891.2023.10160465}, PUBLISHER = {IEEE}, YEAR = {2023}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {IEEE International Conference on Robotics and Automation (ICRA 2023)}, PAGES = {12345--12352}, ADDRESS = {London, UK}, }
Endnote
%0 Conference Proceedings %A Liao, Kang %A Tricard, Thibault %A Piovar&#269;i, Michal %A Seidel, Hans-Peter %A Babaei, Vahid %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Learning Deposition Policies for Fused Multi-Material 3D Printing : %G eng %U http://hdl.handle.net/21.11116/0000-000C-44C2-C %R 10.1109/ICRA48891.2023.10160465 %D 2023 %B IEEE International Conference on Robotics and Automation %Z date of event: 2023-05-29 - 2023-06-02 %C London, UK %B IEEE International Conference on Robotics and Automation %P 12345 - 12352 %I IEEE %@ 979-8-3503-2365-8
Huang, X., Ritschel, T., Seidel, H.-P., Memari, P., and Singh, G. 2023. Patternshop: Editing Point Patterns by Image Manipulation. ACM Transactions on Graphics42, 4.
Export
BibTeX
@article{Huang2023, TITLE = {Patternshop: {E}diting Point Patterns by Image Manipulation}, AUTHOR = {Huang, Xingchang and Ritschel, Tobias and Seidel, Hans-Peter and Memari, Pooran and Singh, Gurprit}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3592418}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2023}, MARGINALMARK = {$\bullet$}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {42}, NUMBER = {4}, PAGES = {1--14}, EID = {53}, }
Endnote
%0 Journal Article %A Huang, Xingchang %A Ritschel, Tobias %A Seidel, Hans-Peter %A Memari, Pooran %A Singh, Gurprit %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Patternshop: Editing Point Patterns by Image Manipulation : %G eng %U http://hdl.handle.net/21.11116/0000-000D-FE1D-6 %R 10.1145/3592418 %7 2023 %D 2023 %J ACM Transactions on Graphics %V 42 %N 4 %& 1 %P 1 - 14 %Z sequence number: 53 %I ACM %C New York, NY %@ false
Çoğalan, U., Bemana, M., Seidel, H.-P., and Myszkowski, K. 2023. Video Frame Interpolation for High Dynamic Range Sequences Captured with Dual-exposure Sensors. Computer Graphics Forum (Proc. EUROGRAPHICS 2023)42, 2.
Export
BibTeX
@article{Cogalan_Eurographics23, TITLE = {Video Frame Interpolation for High Dynamic Range Sequences Captured with Dual-exposure Sensors}, AUTHOR = {{\c C}o{\u g}alan, U{\u g}ur and Bemana, Mojtaba and Seidel, Hans-Peter and Myszkowski, Karol}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.14748}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2023}, MARGINALMARK = {$\bullet$}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {42}, NUMBER = {2}, PAGES = {119--131}, BOOKTITLE = {The European Association for Computer Graphics 43rdAnnual Conference (EUROGRAPHICS 2023)}, }
Endnote
%0 Journal Article %A &#199;o&#287;alan, U&#287;ur %A Bemana, Mojtaba %A Seidel, Hans-Peter %A Myszkowski, Karol %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Video Frame Interpolation for High Dynamic Range Sequences Captured with Dual-exposure Sensors : %G eng %U http://hdl.handle.net/21.11116/0000-000C-F953-E %R 10.1111/cgf.14748 %7 2023 %D 2023 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 42 %N 2 %& 119 %P 119 - 131 %I Blackwell-Wiley %C Oxford %@ false %B The European Association for Computer Graphics 43rdAnnual Conference %O EUROGRAPHICS 2023 EG 2023 Saarbr&#252;cken, Germany, May 8-12, 2023
Chen, B., Jindal, A., Piovarci, M., et al. 2023. The Effect of Display Capabilities on the Gloss Consistency between Real and Virtual Objects. SA ’23: SIGGRAPH Asia 2023 Conference Papers, ACM.
Export
BibTeX
@inproceedings{DBLP:conf/siggrapha/ChenJPWSDMSM23, TITLE = {The Effect of Display Capabilities on the Gloss Consistency between Real and Virtual Objects}, AUTHOR = {Chen, Bin and Jindal, Akshay and Piovarci, Michal and Wang, Chao and Seidel, Hans-Peter and Didyk, Piotr and Myszkowski, Karol and Serrano, Ana and Mantiuk, Rafal K.}, LANGUAGE = {eng}, ISBN = {979-8-4007-0315-7}, DOI = {10.1145/3610548.3618226}, PUBLISHER = {ACM}, YEAR = {2023}, MARGINALMARK = {$\bullet$}, DATE = {2023}, BOOKTITLE = {SA '23: SIGGRAPH Asia 2023 Conference Papers}, EDITOR = {Kim, June and Lin, Ming C. and Bickel, Bernd}, PAGES = {1--11}, EID = {90}, ADDRESS = {Sydney, Australia}, }
Endnote
%0 Conference Proceedings %A Chen, Bin %A Jindal, Akshay %A Piovarci, Michal %A Wang, Chao %A Seidel, Hans-Peter %A Didyk, Piotr %A Myszkowski, Karol %A Serrano, Ana %A Mantiuk, Rafal K. %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T The Effect of Display Capabilities on the Gloss Consistency between Real and Virtual Objects : %G eng %U http://hdl.handle.net/21.11116/0000-000F-7BC2-C %R 10.1145/3610548.3618226 %D 2023 %B SIGGRAPH Asia 2023 Conference %Z date of event: 2023-12-12 - 2023-12-15 %C Sydney, Australia %B SA '23: SIGGRAPH Asia 2023 Conference Papers %E Kim, June; Lin, Ming C.; Bickel, Bernd %P 1 - 11 %Z sequence number: 90 %I ACM %@ 979-8-4007-0315-7 %U https://dl.acm.org/doi/pdf/10.1145/3610548.3618226
Bálint, M., Myszkowski, K., Seidel, H.-P., and Singh, G. 2023a. Joint Sampling and Optimisation for Inverse Rendering. SA ’23: SIGGRAPH Asia 2023 Conference Papers, ACM.
Export
BibTeX
@inproceedings{DBLP:journals/corr/abs-2309-15676, TITLE = {Joint Sampling and Optimisation for Inverse Rendering}, AUTHOR = {B{\'a}lint, Martin and Myszkowski, Karol and Seidel, Hans-Peter and Singh, Gurprit}, LANGUAGE = {eng}, ISBN = {979-8-4007-0315-7}, DOI = {10.1145/3610548.3618244}, PUBLISHER = {ACM}, YEAR = {2023}, MARGINALMARK = {$\bullet$}, DATE = {2023}, BOOKTITLE = {SA '23: SIGGRAPH Asia 2023 Conference Papers}, PAGES = {1--10}, EID = {29}, ADDRESS = {Sydney, Australia}, }
Endnote
%0 Conference Proceedings %A B&#225;lint, Martin %A Myszkowski, Karol %A Seidel, Hans-Peter %A Singh, Gurprit %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Joint Sampling and Optimisation for Inverse Rendering : %G eng %U http://hdl.handle.net/21.11116/0000-000F-7B9B-9 %R 10.1145/3610548.3618244 %D 2023 %B SIGGRAPH Asia 2023 Conference %Z date of event: 2023-12-12 - 2023-12-15 %C Sydney, Australia %B SA '23: SIGGRAPH Asia 2023 Conference Papers %P 1 - 10 %Z sequence number: 29 %I ACM %@ 979-8-4007-0315-7 %U https://dl.acm.org/doi/pdf/10.1145/3610548.3618244
Bálint, M., Wolski, K., Myszkowski, K., Seidel, H.-P., and Mantiuk, R. 2023b. Neural Partitioning Pyramids for Denoising Monte Carlo Renderings. Proceedings SIGGRAPH 2023 Conference Papers, ACM.
Export
BibTeX
@inproceedings{Balint_SIGGRAPH23, TITLE = {Neural Partitioning Pyramids for Denoising {Monte Carlo} Renderings}, AUTHOR = {B{\'a}lint, Martin and Wolski, Krzysztof and Myszkowski, Karol and Seidel, Hans-Peter and Mantiuk, Rafa{\l}}, LANGUAGE = {eng}, ISBN = {979-8-4007-0159-7}, DOI = {10.1145/3588432.3591562}, PUBLISHER = {ACM}, YEAR = {2023}, MARGINALMARK = {$\bullet$}, DATE = {2023}, BOOKTITLE = {Proceedings SIGGRAPH 2023 Conference Papers}, EDITOR = {Brunvand, Erik and Sheffer, Alla and Wimmer, Michael}, PAGES = {1--11}, EID = {60}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A B&#225;lint, Martin %A Wolski, Krzysztof %A Myszkowski, Karol %A Seidel, Hans-Peter %A Mantiuk, Rafa&#322; %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Neural Partitioning Pyramids for Denoising Monte Carlo Renderings : %G eng %U http://hdl.handle.net/21.11116/0000-000E-3740-C %R 10.1145/3588432.3591562 %D 2023 %B ACM SIGGRAPH Conference %Z date of event: 2023-08-06 - 2023-08-10 %C Los Angeles, CA, USA %B Proceedings SIGGRAPH 2023 Conference Papers %E Brunvand, Erik; Sheffer, Alla; Wimmer, Michael %P 1 - 11 %Z sequence number: 60 %I ACM %@ 979-8-4007-0159-7
Arabadzhiyska, E., Tursun, C., Seidel, H.-P., and Didyk, P. 2023. Practical Saccade Prediction for Head-mounted Displays: Towards a Comprehensive Model. ACM Transactions on Applied Perception20, 1.
Export
BibTeX
@article{Arabadzhiyska23, TITLE = {Practical Saccade Prediction for Head-Mounted Displays: {T}owards a Comprehensive Model}, AUTHOR = {Arabadzhiyska, Elena and Tursun, Cara and Seidel, Hans-Peter and Didyk, Piotr}, LANGUAGE = {eng}, ISSN = {1544-3558}, DOI = {10.1145/3568311}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2023}, MARGINALMARK = {$\bullet$}, JOURNAL = {ACM Transactions on Applied Perception}, VOLUME = {20}, NUMBER = {1}, PAGES = {1--23}, EID = {2}, }
Endnote
%0 Journal Article %A Arabadzhiyska, Elena %A Tursun, Cara %A Seidel, Hans-Peter %A Didyk, Piotr %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Practical Saccade Prediction for Head-mounted Displays: Towards a Comprehensive Model : %G eng %U http://hdl.handle.net/21.11116/0000-000C-B76B-E %R 10.1145/3568311 %7 2023 %D 2023 %J ACM Transactions on Applied Perception %V 20 %N 1 %& 1 %P 1 - 23 %Z sequence number: 2 %I ACM %C New York, NY %@ false %U https://dl.acm.org/doi/pdf/10.1145/3568311
Ansari, N., Seidel, H.-P., and Babaei, V. 2023. Large-batch, Iteration-efficient Neural Bayesian Design Optimization. https://arxiv.org/abs/2306.01095.
(arXiv: 2306.01095)
Abstract
Bayesian optimization (BO) provides a powerful framework for optimizing<br>black-box, expensive-to-evaluate functions. It is therefore an attractive tool<br>for engineering design problems, typically involving multiple objectives.<br>Thanks to the rapid advances in fabrication and measurement methods as well as<br>parallel computing infrastructure, querying many design problems can be heavily<br>parallelized. This class of problems challenges BO with an unprecedented setup<br>where it has to deal with very large batches, shifting its focus from sample<br>efficiency to iteration efficiency. We present a novel Bayesian optimization<br>framework specifically tailored to address these limitations. Our key<br>contribution is a highly scalable, sample-based acquisition function that<br>performs a non-dominated sorting of not only the objectives but also their<br>associated uncertainty. We show that our acquisition function in combination<br>with different Bayesian neural network surrogates is effective in<br>data-intensive environments with a minimal number of iterations. We demonstrate<br>the superiority of our method by comparing it with state-of-the-art<br>multi-objective optimizations. We perform our evaluation on two real-world<br>problems -- airfoil design and 3D printing -- showcasing the applicability and<br>efficiency of our approach. Our code is available at:<br>https://github.com/an-on-ym-ous/lbn_mobo<br>
Export
BibTeX
@online{Ansari-et-al_2023, TITLE = {Large-batch, Iteration-efficient Neural Bayesian Design Optimization}, AUTHOR = {Ansari, Navid and Seidel, Hans-Peter and Babaei, Vahid}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2306.01095}, DOI = {10.48550/arXiv.2306.01095}, EPRINT = {2306.01095}, EPRINTTYPE = {arXiv}, YEAR = {2023}, MARGINALMARK = {$\bullet$}, DATE = {2023}, ABSTRACT = {Bayesian optimization (BO) provides a powerful framework for optimizing<br>black-box, expensive-to-evaluate functions. It is therefore an attractive tool<br>for engineering design problems, typically involving multiple objectives.<br>Thanks to the rapid advances in fabrication and measurement methods as well as<br>parallel computing infrastructure, querying many design problems can be heavily<br>parallelized. This class of problems challenges BO with an unprecedented setup<br>where it has to deal with very large batches, shifting its focus from sample<br>efficiency to iteration efficiency. We present a novel Bayesian optimization<br>framework specifically tailored to address these limitations. Our key<br>contribution is a highly scalable, sample-based acquisition function that<br>performs a non-dominated sorting of not only the objectives but also their<br>associated uncertainty. We show that our acquisition function in combination<br>with different Bayesian neural network surrogates is effective in<br>data-intensive environments with a minimal number of iterations. We demonstrate<br>the superiority of our method by comparing it with state-of-the-art<br>multi-objective optimizations. We perform our evaluation on two real-world<br>problems -- airfoil design and 3D printing -- showcasing the applicability and<br>efficiency of our approach. Our code is available at:<br>https://github.com/an-on-ym-ous/lbn_mobo<br>}, }
Endnote
%0 Report %A Ansari, Navid %A Seidel, Hans-Peter %A Babaei, Vahid %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Large-batch, Iteration-efficient Neural Bayesian Design Optimization : %G eng %U http://hdl.handle.net/21.11116/0000-000F-7BB3-D %R 10.48550/arXiv.2306.01095 %U https://arxiv.org/abs/2306.01095 %D 2023 %X Bayesian optimization (BO) provides a powerful framework for optimizing<br>black-box, expensive-to-evaluate functions. It is therefore an attractive tool<br>for engineering design problems, typically involving multiple objectives.<br>Thanks to the rapid advances in fabrication and measurement methods as well as<br>parallel computing infrastructure, querying many design problems can be heavily<br>parallelized. This class of problems challenges BO with an unprecedented setup<br>where it has to deal with very large batches, shifting its focus from sample<br>efficiency to iteration efficiency. We present a novel Bayesian optimization<br>framework specifically tailored to address these limitations. Our key<br>contribution is a highly scalable, sample-based acquisition function that<br>performs a non-dominated sorting of not only the objectives but also their<br>associated uncertainty. We show that our acquisition function in combination<br>with different Bayesian neural network surrogates is effective in<br>data-intensive environments with a minimal number of iterations. We demonstrate<br>the superiority of our method by comparing it with state-of-the-art<br>multi-objective optimizations. We perform our evaluation on two real-world<br>problems -- airfoil design and 3D printing -- showcasing the applicability and<br>efficiency of our approach. Our code is available at:<br>https://github.com/an-on-ym-ous/lbn_mobo<br> %K Computer Science, Learning, cs.LG,Computer Science, Artificial Intelligence, cs.AI,Computer Science, Computational Engineering, Finance, and Science, cs.CE
2022
Wang, C., Serrano, A., Pan, X., et al. 2022a. GlowGAN: Unsupervised Learning of HDR Images from LDR Images in the Wild. https://arxiv.org/abs/2211.12352.
(arXiv: 2211.12352)
Abstract
Most in-the-wild images are stored in Low Dynamic Range (LDR) form, serving<br>as a partial observation of the High Dynamic Range (HDR) visual world. Despite<br>limited dynamic range, these LDR images are often captured with different<br>exposures, implicitly containing information about the underlying HDR image<br>distribution. Inspired by this intuition, in this work we present, to the best<br>of our knowledge, the first method for learning a generative model of HDR<br>images from in-the-wild LDR image collections in a fully unsupervised manner.<br>The key idea is to train a generative adversarial network (GAN) to generate HDR<br>images which, when projected to LDR under various exposures, are<br>indistinguishable from real LDR images. The projection from HDR to LDR is<br>achieved via a camera model that captures the stochasticity in exposure and<br>camera response function. Experiments show that our method GlowGAN can<br>synthesize photorealistic HDR images in many challenging cases such as<br>landscapes, lightning, or windows, where previous supervised generative models<br>produce overexposed images. We further demonstrate the new application of<br>unsupervised inverse tone mapping (ITM) enabled by GlowGAN. Our ITM method does<br>not need HDR images or paired multi-exposure images for training, yet it<br>reconstructs more plausible information for overexposed regions than<br>state-of-the-art supervised learning models trained on such data.<br>
Export
BibTeX
@online{Wang2211.12352, TITLE = {{GlowGAN}: Unsupervised Learning of {HDR} Images from {LDR} Images in the Wild}, AUTHOR = {Wang, Chao and Serrano, Ana and Pan, X. and Chen, Bin and Seidel, Hans-Peter and Theobalt, Christian and Myszkowski, Karol and Leimk{\"u}hler, Thomas}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2211.12352}, EPRINT = {2211.12352}, EPRINTTYPE = {arXiv}, YEAR = {2022}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Most in-the-wild images are stored in Low Dynamic Range (LDR) form, serving<br>as a partial observation of the High Dynamic Range (HDR) visual world. Despite<br>limited dynamic range, these LDR images are often captured with different<br>exposures, implicitly containing information about the underlying HDR image<br>distribution. Inspired by this intuition, in this work we present, to the best<br>of our knowledge, the first method for learning a generative model of HDR<br>images from in-the-wild LDR image collections in a fully unsupervised manner.<br>The key idea is to train a generative adversarial network (GAN) to generate HDR<br>images which, when projected to LDR under various exposures, are<br>indistinguishable from real LDR images. The projection from HDR to LDR is<br>achieved via a camera model that captures the stochasticity in exposure and<br>camera response function. Experiments show that our method GlowGAN can<br>synthesize photorealistic HDR images in many challenging cases such as<br>landscapes, lightning, or windows, where previous supervised generative models<br>produce overexposed images. We further demonstrate the new application of<br>unsupervised inverse tone mapping (ITM) enabled by GlowGAN. Our ITM method does<br>not need HDR images or paired multi-exposure images for training, yet it<br>reconstructs more plausible information for overexposed regions than<br>state-of-the-art supervised learning models trained on such data.<br>}, }
Endnote
%0 Report %A Wang, Chao %A Serrano, Ana %A Pan, X. %A Chen, Bin %A Seidel, Hans-Peter %A Theobalt, Christian %A Myszkowski, Karol %A Leimk&#252;hler, Thomas %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society %T GlowGAN: Unsupervised Learning of HDR Images from LDR Images in the Wild : %G eng %U http://hdl.handle.net/21.11116/0000-000B-9D08-C %U https://arxiv.org/abs/2211.12352 %D 2022 %X Most in-the-wild images are stored in Low Dynamic Range (LDR) form, serving<br>as a partial observation of the High Dynamic Range (HDR) visual world. Despite<br>limited dynamic range, these LDR images are often captured with different<br>exposures, implicitly containing information about the underlying HDR image<br>distribution. Inspired by this intuition, in this work we present, to the best<br>of our knowledge, the first method for learning a generative model of HDR<br>images from in-the-wild LDR image collections in a fully unsupervised manner.<br>The key idea is to train a generative adversarial network (GAN) to generate HDR<br>images which, when projected to LDR under various exposures, are<br>indistinguishable from real LDR images. The projection from HDR to LDR is<br>achieved via a camera model that captures the stochasticity in exposure and<br>camera response function. Experiments show that our method GlowGAN can<br>synthesize photorealistic HDR images in many challenging cases such as<br>landscapes, lightning, or windows, where previous supervised generative models<br>produce overexposed images. We further demonstrate the new application of<br>unsupervised inverse tone mapping (ITM) enabled by GlowGAN. Our ITM method does<br>not need HDR images or paired multi-exposure images for training, yet it<br>reconstructs more plausible information for overexposed regions than<br>state-of-the-art supervised learning models trained on such data.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,eess.IV
Wang, C., Chen, B., Seidel, H.-P., Myszkowski, K., and Serrano, A. 2022b. Learning a self-supervised tone mapping operator via feature contrast masking loss. Computer Graphics Forum (Proc. EUROGRAPHICS 2022)41, 2.
Export
BibTeX
@article{Wang2022, TITLE = {Learning a self-supervised tone mapping operator via feature contrast masking loss}, AUTHOR = {Wang, Chao and Chen, Bin and Seidel, Hans-Peter and Myszkowski, Karol and Serrano, Ana}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.14459}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2022}, MARGINALMARK = {$\bullet$}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {41}, NUMBER = {2}, PAGES = {71--84}, BOOKTITLE = {The European Association for Computer Graphics 43rdAnnual Conference (EUROGRAPHICS 2022)}, EDITOR = {Caine, Rapha{\"e}lle and Kim, Min H.}, }
Endnote
%0 Journal Article %A Wang, Chao %A Chen, Bin %A Seidel, Hans-Peter %A Myszkowski, Karol %A Serrano, Ana %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Learning a self-supervised tone mapping operator via feature contrast masking loss : %G eng %U http://hdl.handle.net/21.11116/0000-000A-BA09-B %R 10.1111/cgf.14459 %7 2022 %D 2022 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 41 %N 2 %& 71 %P 71 - 84 %I Blackwell-Wiley %C Oxford %@ false %B The European Association for Computer Graphics 43rdAnnual Conference %O EUROGRAPHICS 2022 EG 2022 Reims, France, April 25 - 29, 2022
Salaün, C., Georgiev, I., Seidel, H.-P., and Singh, G. 2022. Scalable Multi-Class Sampling via Filtered Sliced Optimal Transport. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2022)41, 6.
Export
BibTeX
@article{SalauenSIGGRAPHAsia22, TITLE = {Scalable Multi-Class Sampling via Filtered Sliced Optimal Transport}, AUTHOR = {Sala{\"u}n, Corentin and Georgiev, Iliyan and Seidel, Hans-Peter and Singh, Gurprit}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3550454.3555484}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2022}, MARGINALMARK = {$\bullet$}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {41}, NUMBER = {6}, PAGES = {1--14}, EID = {261}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2022}, }
Endnote
%0 Journal Article %A Sala&#252;n, Corentin %A Georgiev, Iliyan %A Seidel, Hans-Peter %A Singh, Gurprit %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Scalable Multi-Class Sampling via Filtered Sliced Optimal Transport : %G eng %U http://hdl.handle.net/21.11116/0000-000C-1716-2 %R 10.1145/3550454.3555484 %7 2022 %D 2022 %J ACM Transactions on Graphics %V 41 %N 6 %& 1 %P 1 - 14 %Z sequence number: 261 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2022 %O ACM SIGGRAPH Asia 2022 SA '22 SA 2022
Huang, X., Memari, P., Seidel, H.-P., and Singh, G. 2022. Point-Pattern Synthesis using Gabor and Random Filters. Computer Graphics Forum (Proc. Eurographics Symposium on Rendering 2022)41, 4.
Export
BibTeX
@article{Huang_EGSR2022, TITLE = {Point-Pattern Synthesis using {Gabor} and Random Filters}, AUTHOR = {Huang, Xingchang and Memari, Pooran and Seidel, Hans-Peter and Singh, Gurprit}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.14596}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2022}, MARGINALMARK = {$\bullet$}, DATE = {2022}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Rendering)}, VOLUME = {41}, NUMBER = {4}, PAGES = {169--179}, BOOKTITLE = {Eurographics Symposium on Rendering 2022}, EDITOR = {Ghosh, Abhijeet and Wei, Li-Yi and Wilkie, Alexander}, }
Endnote
%0 Journal Article %A Huang, Xingchang %A Memari, Pooran %A Seidel, Hans-Peter %A Singh, Gurprit %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Point-Pattern Synthesis using Gabor and Random Filters : %G eng %U http://hdl.handle.net/21.11116/0000-000C-1675-8 %R 10.1111/cgf.14596 %7 2022 %D 2022 %J Computer Graphics Forum %V 41 %N 4 %& 169 %P 169 - 179 %I Wiley-Blackwell %C Oxford %@ false %B Eurographics Symposium on Rendering 2022 %O Eurographics Symposium on Rendering 2022 EGSR 2022 Prague, Czech Republic & Virtual ; 4 - 6 July 2022 %U https://onlinelibrary.wiley.com/share/X44DPUPXHCYNCUKSEBEE?target=10.1111/cgf.14596
Hladký, J., Stengel, M., Vining, N., Kerbl, B., Seidel, H.-P., and Steinberger, M. 2022. QuadStream: A Quad-Based Scene Streaming Architecture for Novel Viewpoint Reconstruction. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2022)41, 6.
Export
BibTeX
@article{HladkySIGGRAPHAsia22, TITLE = {QuadStream: {A} Quad-Based Scene Streaming Architecture for Novel Viewpoint Reconstruction}, AUTHOR = {Hladk{\'y}, Jozef and Stengel, Michael and Vining, Nicholas and Kerbl, Bernhard and Seidel, Hans-Peter and Steinberger, Markus}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3550454.3555524}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2022}, MARGINALMARK = {$\bullet$}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {41}, NUMBER = {6}, PAGES = {1--13}, EID = {233}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2022}, }
Endnote
%0 Journal Article %A Hladk&#253;, Jozef %A Stengel, Michael %A Vining, Nicholas %A Kerbl, Bernhard %A Seidel, Hans-Peter %A Steinberger, Markus %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T QuadStream: A Quad-Based Scene Streaming Architecture for Novel Viewpoint Reconstruction : %G eng %U http://hdl.handle.net/21.11116/0000-000C-208B-3 %R 10.1145/3550454.3555524 %7 2022 %D 2022 %J ACM Transactions on Graphics %V 41 %N 6 %& 1 %P 1 - 13 %Z sequence number: 233 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2022 %O ACM SIGGRAPH Asia 2022 SA '22 SA 2022
Çoğalan, U., Bemana, M., Myszkowski, K., Seidel, H.-P., and Ritschel, T. 2022a. Learning HDR Video Reconstruction for Dual-Exposure Sensors with Temporally-alternating Exposures. Computers and Graphics105.
Export
BibTeX
@article{Cogalan2022, TITLE = {Learning {HDR} Video Reconstruction for Dual-Exposure Sensors with Temporally-alternating Exposures}, AUTHOR = {{\c C}o{\u g}alan, U{\u g}ur and Bemana, Mojtaba and Myszkowski, Karol and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, ISSN = {0097-8493}, DOI = {10.1016/j.cag.2022.04.008}, PUBLISHER = {Elsevier}, ADDRESS = {Amsterdam}, YEAR = {2022}, MARGINALMARK = {$\bullet$}, JOURNAL = {Computers and Graphics}, VOLUME = {105}, PAGES = {57--72}, }
Endnote
%0 Journal Article %A &#199;o&#287;alan, U&#287;ur %A Bemana, Mojtaba %A Myszkowski, Karol %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Learning HDR Video Reconstruction for Dual-Exposure Sensors with Temporally-alternating Exposures : %G eng %U http://hdl.handle.net/21.11116/0000-000A-9D95-D %R 10.1016/j.cag.2022.04.008 %7 2022 %D 2022 %J Computers and Graphics %V 105 %& 57 %P 57 - 72 %I Elsevier %C Amsterdam %@ false
Çoğalan, U., Bemana, M., Seidel, H.-P., and Myszkowski, K. 2022b. Video Frame Interpolation for High Dynamic Range Sequences Captured with Dual-exposure Sensors. https://arxiv.org/abs/2206.09485.
(arXiv: 2206.09485)
Abstract
Video frame interpolation (VFI) enables many important applications that<br>might involve the temporal domain, such as slow motion playback, or the spatial<br>domain, such as stop motion sequences. We are focusing on the former task,<br>where one of the key challenges is handling high dynamic range (HDR) scenes in<br>the presence of complex motion. To this end, we explore possible advantages of<br>dual-exposure sensors that readily provide sharp short and blurry long<br>exposures that are spatially registered and whose ends are temporally aligned.<br>This way, motion blur registers temporally continuous information on the scene<br>motion that, combined with the sharp reference, enables more precise motion<br>sampling within a single camera shot. We demonstrate that this facilitates a<br>more complex motion reconstruction in the VFI task, as well as HDR frame<br>reconstruction that so far has been considered only for the originally captured<br>frames, not in-between interpolated frames. We design a neural network trained<br>in these tasks that clearly outperforms existing solutions. We also propose a<br>metric for scene motion complexity that provides important insights into the<br>performance of VFI methods at the test time.<br>
Export
BibTeX
@online{Cogalan2206.09485, TITLE = {Video Frame Interpolation for High Dynamic Range Sequences Captured with Dual-exposure Sensors}, AUTHOR = {{\c C}o{\u g}alan, U{\u g}ur and Bemana, Mojtaba and Seidel, Hans-Peter and Myszkowski, Karol}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2206.09485}, EPRINT = {2206.09485}, EPRINTTYPE = {arXiv}, YEAR = {2022}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Video frame interpolation (VFI) enables many important applications that<br>might involve the temporal domain, such as slow motion playback, or the spatial<br>domain, such as stop motion sequences. We are focusing on the former task,<br>where one of the key challenges is handling high dynamic range (HDR) scenes in<br>the presence of complex motion. To this end, we explore possible advantages of<br>dual-exposure sensors that readily provide sharp short and blurry long<br>exposures that are spatially registered and whose ends are temporally aligned.<br>This way, motion blur registers temporally continuous information on the scene<br>motion that, combined with the sharp reference, enables more precise motion<br>sampling within a single camera shot. We demonstrate that this facilitates a<br>more complex motion reconstruction in the VFI task, as well as HDR frame<br>reconstruction that so far has been considered only for the originally captured<br>frames, not in-between interpolated frames. We design a neural network trained<br>in these tasks that clearly outperforms existing solutions. We also propose a<br>metric for scene motion complexity that provides important insights into the<br>performance of VFI methods at the test time.<br>}, }
Endnote
%0 Report %A &#199;o&#287;alan, U&#287;ur %A Bemana, Mojtaba %A Seidel, Hans-Peter %A Myszkowski, Karol %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Video Frame Interpolation for High Dynamic Range Sequences Captured with Dual-exposure Sensors : %G eng %U http://hdl.handle.net/21.11116/0000-000C-16E8-6 %U https://arxiv.org/abs/2206.09485 %D 2022 %X Video frame interpolation (VFI) enables many important applications that<br>might involve the temporal domain, such as slow motion playback, or the spatial<br>domain, such as stop motion sequences. We are focusing on the former task,<br>where one of the key challenges is handling high dynamic range (HDR) scenes in<br>the presence of complex motion. To this end, we explore possible advantages of<br>dual-exposure sensors that readily provide sharp short and blurry long<br>exposures that are spatially registered and whose ends are temporally aligned.<br>This way, motion blur registers temporally continuous information on the scene<br>motion that, combined with the sharp reference, enables more precise motion<br>sampling within a single camera shot. We demonstrate that this facilitates a<br>more complex motion reconstruction in the VFI task, as well as HDR frame<br>reconstruction that so far has been considered only for the originally captured<br>frames, not in-between interpolated frames. We design a neural network trained<br>in these tasks that clearly outperforms existing solutions. We also propose a<br>metric for scene motion complexity that provides important insights into the<br>performance of VFI methods at the test time.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Chu, M., Liu, L., Zheng, Q., et al. 2022. Physics Informed Neural Fields for Smoke Reconstruction with Sparse Data. ACM Transactions on Graphics41, 4.
Export
BibTeX
@article{Chu2022, TITLE = {Physics Informed Neural Fields for Smoke Reconstruction with Sparse Data}, AUTHOR = {Chu, Mengyu and Liu, Lingjie and Zheng, Quan and Franz, Erik and Seidel, Hans-Peter and Theobalt, Christian and Zayer, Rhaleb}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3528223.3530169}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2022}, MARGINALMARK = {$\bullet$}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {41}, NUMBER = {4}, PAGES = {1--14}, EID = {119}, }
Endnote
%0 Journal Article %A Chu, Mengyu %A Liu, Lingjie %A Zheng, Quan %A Franz, Erik %A Seidel, Hans-Peter %A Theobalt, Christian %A Zayer, Rhaleb %+ Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Physics Informed Neural Fields for Smoke Reconstruction with Sparse Data : %G eng %U http://hdl.handle.net/21.11116/0000-000B-6561-6 %R 10.1145/3528223.3530169 %7 2022 %D 2022 %J ACM Transactions on Graphics %V 41 %N 4 %& 1 %P 1 - 14 %Z sequence number: 119 %I ACM %C New York, NY %@ false %U https://people.mpi-inf.mpg.de/~mchu/projects/PI-NeRF/
Chen, B., Piovarči, M., Wang, C., et al. 2022. Gloss Management for Consistent Reproduction of Real and Virtual Objects. Proceedings SIGGRAPH Asia 2022 (ACM SIGGRAPH Asia 2022), ACM.
Export
BibTeX
@inproceedings{ChenSA22, TITLE = {Gloss Management for Consistent Reproduction of Real and Virtual Objects}, AUTHOR = {Chen, Bin and Piovar{\v c}i, Michal and Wang, Chao and Seidel, Hans-Peter and Didyk, Piotr and Myszkowski, Karol and Serrano, Ana}, LANGUAGE = {eng}, ISBN = {978-1-4503-9470-3}, DOI = {10.1145/3550469.3555406}, PUBLISHER = {ACM}, YEAR = {2022}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {Proceedings SIGGRAPH Asia 2022 (ACM SIGGRAPH Asia 2022)}, EDITOR = {Jung, Soon Ki and Lee, Jehee and Bargteil, Adam}, PAGES = {1--9}, EID = {35}, ADDRESS = {Daegu, Republic of Korea}, }
Endnote
%0 Conference Proceedings %A Chen, Bin %A Piovar&#269;i, Michal %A Wang, Chao %A Seidel, Hans-Peter %A Didyk, Piotr %A Myszkowski, Karol %A Serrano, Ana %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Gloss Management for Consistent Reproduction of Real and Virtual Objects : %G eng %U http://hdl.handle.net/21.11116/0000-000C-167F-E %R 10.1145/3550469.3555406 %D 2022 %B SIGGRAPH Asia 2022 %Z date of event: 2022-12-06 - 2022-12-09 %C Daegu, Republic of Korea %B Proceedings SIGGRAPH Asia 2022 %E Jung, Soon Ki; Lee, Jehee; Bargteil, Adam %P 1 - 9 %Z sequence number: 35 %I ACM %@ 978-1-4503-9470-3
Bemana, M., Myszkowski, K., Frisvad, J.R., Seidel, H.-P., and Ritschel, T. 2022. Eikonal Fields for Refractive Novel-View Synthesis. Proceedings SIGGRAPH 2022 Conference Papers Proceedings (ACM SIGGRAPH 2022), ACM.
Export
BibTeX
@inproceedings{Bemana_SIGGRAPH22, TITLE = {Eikonal Fields for Refractive Novel-View Synthesis}, AUTHOR = {Bemana, Mojtaba and Myszkowski, Karol and Frisvad, Jeppe Revall and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, ISBN = {978-1-4503-9337-9}, DOI = {10.1145/3528233.3530706}, PUBLISHER = {ACM}, YEAR = {2022}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {Proceedings SIGGRAPH 2022 Conference Papers Proceedings (ACM SIGGRAPH 2022)}, EDITOR = {Nandigjav, Munkhtsetseg and Mitra, Niloy J. and Hertzmann, Aaron}, PAGES = {1--9}, EID = {39}, ADDRESS = {Vancouver, Canada}, }
Endnote
%0 Conference Proceedings %A Bemana, Mojtaba %A Myszkowski, Karol %A Frisvad, Jeppe Revall %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Eikonal Fields for Refractive Novel-View Synthesis : %G eng %U http://hdl.handle.net/21.11116/0000-000A-BA61-7 %R 10.1145/3528233.3530706 %D 2022 %B ACM SIGGRAPH %Z date of event: 2022-08-07 - 2022-08-11 %C Vancouver, Canada %B Proceedings SIGGRAPH 2022 Conference Papers Proceedings %E Nandigjav, Munkhtsetseg; Mitra, Niloy J.; Hertzmann, Aaron %P 1 - 9 %Z sequence number: 39 %I ACM %@ 978-1-4503-9337-9
Arabadzhiyska, E., Tursun, C., Seidel, H.-P., and Didyk, P. 2022. Practical Saccade Prediction for Head-Mounted Displays: Towards a Comprehensive Model. https://arxiv.org/abs/2205.01624.
(arXiv: 2205.01624)
Abstract
Eye-tracking technology is an integral component of new display devices such<br>as virtual and augmented reality headsets. Applications of gaze information<br>range from new interaction techniques exploiting eye patterns to<br>gaze-contingent digital content creation. However, system latency is still a<br>significant issue in many of these applications because it breaks the<br>synchronization between the current and measured gaze positions. Consequently,<br>it may lead to unwanted visual artifacts and degradation of user experience. In<br>this work, we focus on foveated rendering applications where the quality of an<br>image is reduced towards the periphery for computational savings. In foveated<br>rendering, the presence of latency leads to delayed updates to the rendered<br>frame, making the quality degradation visible to the user. To address this<br>issue and to combat system latency, recent work proposes to use saccade landing<br>position prediction to extrapolate the gaze information from delayed<br>eye-tracking samples. While the benefits of such a strategy have already been<br>demonstrated, the solutions range from simple and efficient ones, which make<br>several assumptions about the saccadic eye movements, to more complex and<br>costly ones, which use machine learning techniques. Yet, it is unclear to what<br>extent the prediction can benefit from accounting for additional factors. This<br>paper presents a series of experiments investigating the importance of<br>different factors for saccades prediction in common virtual and augmented<br>reality applications. In particular, we investigate the effects of saccade<br>orientation in 3D space and smooth pursuit eye-motion (SPEM) and how their<br>influence compares to the variability across users. We also present a simple<br>yet efficient correction method that adapts the existing saccade prediction<br>methods to handle these factors without performing extensive data collection.<br>
Export
BibTeX
@online{Arabadzhiyska2205.01624, TITLE = {Practical Saccade Prediction for Head-Mounted Displays: Towards a Comprehensive Model}, AUTHOR = {Arabadzhiyska, Elena and Tursun, Cara and Seidel, Hans-Peter and Didyk, Piotr}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2205.01624}, EPRINT = {2205.01624}, EPRINTTYPE = {arXiv}, YEAR = {2022}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Eye-tracking technology is an integral component of new display devices such<br>as virtual and augmented reality headsets. Applications of gaze information<br>range from new interaction techniques exploiting eye patterns to<br>gaze-contingent digital content creation. However, system latency is still a<br>significant issue in many of these applications because it breaks the<br>synchronization between the current and measured gaze positions. Consequently,<br>it may lead to unwanted visual artifacts and degradation of user experience. In<br>this work, we focus on foveated rendering applications where the quality of an<br>image is reduced towards the periphery for computational savings. In foveated<br>rendering, the presence of latency leads to delayed updates to the rendered<br>frame, making the quality degradation visible to the user. To address this<br>issue and to combat system latency, recent work proposes to use saccade landing<br>position prediction to extrapolate the gaze information from delayed<br>eye-tracking samples. While the benefits of such a strategy have already been<br>demonstrated, the solutions range from simple and efficient ones, which make<br>several assumptions about the saccadic eye movements, to more complex and<br>costly ones, which use machine learning techniques. Yet, it is unclear to what<br>extent the prediction can benefit from accounting for additional factors. This<br>paper presents a series of experiments investigating the importance of<br>different factors for saccades prediction in common virtual and augmented<br>reality applications. In particular, we investigate the effects of saccade<br>orientation in 3D space and smooth pursuit eye-motion (SPEM) and how their<br>influence compares to the variability across users. We also present a simple<br>yet efficient correction method that adapts the existing saccade prediction<br>methods to handle these factors without performing extensive data collection.<br>}, }
Endnote
%0 Report %A Arabadzhiyska, Elena %A Tursun, Cara %A Seidel, Hans-Peter %A Didyk, Piotr %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Practical Saccade Prediction for Head-Mounted Displays: Towards a Comprehensive Model : %G eng %U http://hdl.handle.net/21.11116/0000-000C-16E3-B %U https://arxiv.org/abs/2205.01624 %D 2022 %X Eye-tracking technology is an integral component of new display devices such<br>as virtual and augmented reality headsets. Applications of gaze information<br>range from new interaction techniques exploiting eye patterns to<br>gaze-contingent digital content creation. However, system latency is still a<br>significant issue in many of these applications because it breaks the<br>synchronization between the current and measured gaze positions. Consequently,<br>it may lead to unwanted visual artifacts and degradation of user experience. In<br>this work, we focus on foveated rendering applications where the quality of an<br>image is reduced towards the periphery for computational savings. In foveated<br>rendering, the presence of latency leads to delayed updates to the rendered<br>frame, making the quality degradation visible to the user. To address this<br>issue and to combat system latency, recent work proposes to use saccade landing<br>position prediction to extrapolate the gaze information from delayed<br>eye-tracking samples. While the benefits of such a strategy have already been<br>demonstrated, the solutions range from simple and efficient ones, which make<br>several assumptions about the saccadic eye movements, to more complex and<br>costly ones, which use machine learning techniques. Yet, it is unclear to what<br>extent the prediction can benefit from accounting for additional factors. This<br>paper presents a series of experiments investigating the importance of<br>different factors for saccades prediction in common virtual and augmented<br>reality applications. In particular, we investigate the effects of saccade<br>orientation in 3D space and smooth pursuit eye-motion (SPEM) and how their<br>influence compares to the variability across users. We also present a simple<br>yet efficient correction method that adapts the existing saccade prediction<br>methods to handle these factors without performing extensive data collection.<br> %K Computer Science, Human-Computer Interaction, cs.HC,Computer Science, Graphics, cs.GR
Ansari, N., Seidel, H.-P., and Babaei, V. 2022a. Mixed Integer Neural Inverse Design. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2022)41, 4.
Export
BibTeX
@article{Ansari22, TITLE = {Mixed Integer Neural Inverse Design}, AUTHOR = {Ansari, Navid and Seidel, Hans-Peter and Babaei, Vahid}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3528223.3530083}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2022}, MARGINALMARK = {$\bullet$}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {41}, NUMBER = {4}, PAGES = {1--14}, EID = {151}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2022}, }
Endnote
%0 Journal Article %A Ansari, Navid %A Seidel, Hans-Peter %A Babaei, Vahid %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Mixed Integer Neural Inverse Design : %G eng %U http://hdl.handle.net/21.11116/0000-000C-1678-5 %R 10.1145/3528223.3530083 %7 2022 %D 2022 %J ACM Transactions on Graphics %V 41 %N 4 %& 1 %P 1 - 14 %Z sequence number: 151 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2022 %O ACM SIGGRAPH 2022
Ansari, N., Seidel, H.-P., Vahidi Ferdowsi, N., and Babaei, V. 2022b. Autoinverse: Uncertainty Aware Inversion of Neural Networks. Advances in Neural Information Processing Systems 35 (NeurIPS 2022), Curran Associates, Inc.
Export
BibTeX
@inproceedings{Ansari_Neurips22, TITLE = {Autoinverse: {U}ncertainty Aware Inversion of Neural Networks}, AUTHOR = {Ansari, Navid and Seidel, Hans-Peter and Vahidi Ferdowsi, Nima and Babaei, Vahid}, LANGUAGE = {eng}, PUBLISHER = {Curran Associates, Inc}, YEAR = {2022}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {Advances in Neural Information Processing Systems 35 (NeurIPS 2022)}, EDITOR = {Koyejo, S. and Mohamed, S. and Agarwal, A. and Belgrave, D. and Cho, K. and Oh, A.}, PAGES = {8675--8686}, ADDRESS = {New Orleans, LA, USA}, }
Endnote
%0 Conference Proceedings %A Ansari, Navid %A Seidel, Hans-Peter %A Vahidi Ferdowsi, Nima %A Babaei, Vahid %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Autoinverse: Uncertainty Aware Inversion of Neural Networks : %G eng %U http://hdl.handle.net/21.11116/0000-000C-16F6-6 %D 2022 %B 36th Conference on Neural Information Processing Systems %Z date of event: 2022-11-28 - 2022-12-09 %C New Orleans, LA, USA %B Advances in Neural Information Processing Systems 35 %E Koyejo, S.; Mohamed, S.; Agarwal, A.; Belgrave, D.; Cho, K.; Oh, A. %P 8675 - 8686 %I Curran Associates, Inc %U https://openreview.net/pdf?id=dNyCj1AbOb
2021
Zheng, Q., Singh, G., and Seidel, H.-P. 2021. Neural Relightable Participating Media Rendering. Advances in Neural Information Processing Systems 34 (NeurIPS 2021), Curran Associates, Inc.
Export
BibTeX
@inproceedings{Zheng_Neurips2021, TITLE = {Neural Relightable Participating Media Rendering}, AUTHOR = {Zheng, Quan and Singh, Gurprit and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {9781713845393}, PUBLISHER = {Curran Associates, Inc.}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {Advances in Neural Information Processing Systems 34 (NeurIPS 2021)}, EDITOR = {Ranzato, M. and Beygelzimer, A. and Liang, P. S. and Vaughan, J. W. and Dauphin, Y.}, PAGES = {15203--15215}, ADDRESS = {Virtual}, }
Endnote
%0 Conference Proceedings %A Zheng, Quan %A Singh, Gurprit %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Neural Relightable Participating Media Rendering : %G eng %U http://hdl.handle.net/21.11116/0000-0009-7117-E %D 2021 %B 35th Conference on Neural Information Processing Systems %Z date of event: 2021-12-06 - 2021-12-14 %C Virtual %B Advances in Neural Information Processing Systems 34 %E Ranzato, M.; Beygelzimer, A.; Liang, P. S.; Vaughan, J. W.; Dauphin, Y. %P 15203 - 15215 %I Curran Associates, Inc. %@ 9781713845393
Yenamandra, T., Tewari, A., Bernard, F., et al. 2021. i3DMM: Deep Implicit 3D Morphable Model of Human Heads. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2021), IEEE.
Export
BibTeX
@inproceedings{Yenamandra_CVPR2021, TITLE = {{i3DMM}: {D}eep Implicit {3D} Morphable Model of Human Heads}, AUTHOR = {Yenamandra, Tarun and Tewari, Ayush and Bernard, Florian and Seidel, Hans-Peter and Elgharib, Mohamed and Cremers, Daniel and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-6654-4509-2}, DOI = {10.1109/CVPR46437.2021.01261}, PUBLISHER = {IEEE}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2021)}, PAGES = {12803--12813}, ADDRESS = {Virtual Conference}, }
Endnote
%0 Conference Proceedings %A Yenamandra, Tarun %A Tewari, Ayush %A Bernard, Florian %A Seidel, Hans-Peter %A Elgharib, Mohamed %A Cremers, Daniel %A Theobalt, Christian %+ External Organizations Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society External Organizations Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society %T i3DMM: Deep Implicit 3D Morphable Model of Human Heads : %G eng %U http://hdl.handle.net/21.11116/0000-0008-8966-B %R 10.1109/CVPR46437.2021.01261 %D 2021 %B 34th IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2021-06-19 - 2021-06-25 %C Virtual Conference %B IEEE/CVF Conference on Computer Vision and Pattern Recognition %P 12803 - 12813 %I IEEE %@ 978-1-6654-4509-2 %U https://gvv.mpi-inf.mpg.de/projects/i3DMM/
Weinrauch, A., Seidel, H.-P., Mlakar, D., Steinberger, M., and Zayer, R. 2021. A Variational Loop Shrinking Analogy for Handle and Tunnel Detection and Reeb Graph Construction on Surfaces. https://arxiv.org/abs/2105.13168.
(arXiv: 2105.13168)
Abstract
The humble loop shrinking property played a central role in the inception of<br>modern topology but it has been eclipsed by more abstract algebraic formalism.<br>This is particularly true in the context of detecting relevant non-contractible<br>loops on surfaces where elaborate homological and/or graph theoretical<br>constructs are favored in algorithmic solutions. In this work, we devise a<br>variational analogy to the loop shrinking property and show that it yields a<br>simple, intuitive, yet powerful solution allowing a streamlined treatment of<br>the problem of handle and tunnel loop detection. Our formalization tracks the<br>evolution of a diffusion front randomly initiated on a single location on the<br>surface. Capitalizing on a diffuse interface representation combined with a set<br>of rules for concurrent front interactions, we develop a dynamic data structure<br>for tracking the evolution on the surface encoded as a sparse matrix which<br>serves for performing both diffusion numerics and loop detection and acts as<br>the workhorse of our fully parallel implementation. The substantiated results<br>suggest our approach outperforms state of the art and robustly copes with<br>highly detailed geometric models. As a byproduct, our approach can be used to<br>construct Reeb graphs by diffusion thus avoiding commonly encountered issues<br>when using Morse functions.<br>
Export
BibTeX
@online{Weinrauch_2105.13168, TITLE = {A Variational Loop Shrinking Analogy for Handle and Tunnel Detection and {Reeb} Graph Construction on Surfaces}, AUTHOR = {Weinrauch, Alexander and Seidel, Hans-Peter and Mlakar, Daniel and Steinberger, Markus and Zayer, Rhaleb}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2105.13168}, EPRINT = {2105.13168}, EPRINTTYPE = {arXiv}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, ABSTRACT = {The humble loop shrinking property played a central role in the inception of<br>modern topology but it has been eclipsed by more abstract algebraic formalism.<br>This is particularly true in the context of detecting relevant non-contractible<br>loops on surfaces where elaborate homological and/or graph theoretical<br>constructs are favored in algorithmic solutions. In this work, we devise a<br>variational analogy to the loop shrinking property and show that it yields a<br>simple, intuitive, yet powerful solution allowing a streamlined treatment of<br>the problem of handle and tunnel loop detection. Our formalization tracks the<br>evolution of a diffusion front randomly initiated on a single location on the<br>surface. Capitalizing on a diffuse interface representation combined with a set<br>of rules for concurrent front interactions, we develop a dynamic data structure<br>for tracking the evolution on the surface encoded as a sparse matrix which<br>serves for performing both diffusion numerics and loop detection and acts as<br>the workhorse of our fully parallel implementation. The substantiated results<br>suggest our approach outperforms state of the art and robustly copes with<br>highly detailed geometric models. As a byproduct, our approach can be used to<br>construct Reeb graphs by diffusion thus avoiding commonly encountered issues<br>when using Morse functions.<br>}, }
Endnote
%0 Report %A Weinrauch, Alexander %A Seidel, Hans-Peter %A Mlakar, Daniel %A Steinberger, Markus %A Zayer, Rhaleb %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T A Variational Loop Shrinking Analogy for Handle and Tunnel Detection and Reeb Graph Construction on Surfaces : %G eng %U http://hdl.handle.net/21.11116/0000-0009-70EE-D %U https://arxiv.org/abs/2105.13168 %D 2021 %X The humble loop shrinking property played a central role in the inception of<br>modern topology but it has been eclipsed by more abstract algebraic formalism.<br>This is particularly true in the context of detecting relevant non-contractible<br>loops on surfaces where elaborate homological and/or graph theoretical<br>constructs are favored in algorithmic solutions. In this work, we devise a<br>variational analogy to the loop shrinking property and show that it yields a<br>simple, intuitive, yet powerful solution allowing a streamlined treatment of<br>the problem of handle and tunnel loop detection. Our formalization tracks the<br>evolution of a diffusion front randomly initiated on a single location on the<br>surface. Capitalizing on a diffuse interface representation combined with a set<br>of rules for concurrent front interactions, we develop a dynamic data structure<br>for tracking the evolution on the surface encoded as a sparse matrix which<br>serves for performing both diffusion numerics and loop detection and acts as<br>the workhorse of our fully parallel implementation. The substantiated results<br>suggest our approach outperforms state of the art and robustly copes with<br>highly detailed geometric models. As a byproduct, our approach can be used to<br>construct Reeb graphs by diffusion thus avoiding commonly encountered issues<br>when using Morse functions.<br> %K Computer Science, Graphics, cs.GR,Computer Science, Computational Geometry, cs.CG,Mathematics, Algebraic Topology, math.AT
Wang, C., Chen, B., Seidel, H.-P., Myszkowski, K., and Serrano, A. 2021. Learning a self-supervised tone mapping operator via feature contrast masking loss. https://arxiv.org/abs/2110.09866.
(arXiv: 2110.09866)
Abstract
High Dynamic Range (HDR) content is becoming ubiquitous due to the rapid<br>development of capture technologies. Nevertheless, the dynamic range of common<br>display devices is still limited, therefore tone mapping (TM) remains a key<br>challenge for image visualization. Recent work has demonstrated that neural<br>networks can achieve remarkable performance in this task when compared to<br>traditional methods, however, the quality of the results of these<br>learning-based methods is limited by the training data. Most existing works use<br>as training set a curated selection of best-performing results from existing<br>traditional tone mapping operators (often guided by a quality metric),<br>therefore, the quality of newly generated results is fundamentally limited by<br>the performance of such operators. This quality might be even further limited<br>by the pool of HDR content that is used for training. In this work we propose a<br>learning-based self-supervised tone mapping operator that is trained at test<br>time specifically for each HDR image and does not need any data labeling. The<br>key novelty of our approach is a carefully designed loss function built upon<br>fundamental knowledge on contrast perception that allows for directly comparing<br>the content in the HDR and tone mapped images. We achieve this goal by<br>reformulating classic VGG feature maps into feature contrast maps that<br>normalize local feature differences by their average magnitude in a local<br>neighborhood, allowing our loss to account for contrast masking effects. We<br>perform extensive ablation studies and exploration of parameters and<br>demonstrate that our solution outperforms existing approaches with a single set<br>of fixed parameters, as confirmed by both objective and subjective metrics.<br>
Export
BibTeX
@online{Wang_2110.09866, TITLE = {Learning a self-supervised tone mapping operator via feature contrast masking loss}, AUTHOR = {Wang, Chao and Chen, Bin and Seidel, Hans-Peter and Myszkowski, Karol and Serrano, Ana}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2110.09866}, EPRINT = {2110.09866}, EPRINTTYPE = {arXiv}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, ABSTRACT = {High Dynamic Range (HDR) content is becoming ubiquitous due to the rapid<br>development of capture technologies. Nevertheless, the dynamic range of common<br>display devices is still limited, therefore tone mapping (TM) remains a key<br>challenge for image visualization. Recent work has demonstrated that neural<br>networks can achieve remarkable performance in this task when compared to<br>traditional methods, however, the quality of the results of these<br>learning-based methods is limited by the training data. Most existing works use<br>as training set a curated selection of best-performing results from existing<br>traditional tone mapping operators (often guided by a quality metric),<br>therefore, the quality of newly generated results is fundamentally limited by<br>the performance of such operators. This quality might be even further limited<br>by the pool of HDR content that is used for training. In this work we propose a<br>learning-based self-supervised tone mapping operator that is trained at test<br>time specifically for each HDR image and does not need any data labeling. The<br>key novelty of our approach is a carefully designed loss function built upon<br>fundamental knowledge on contrast perception that allows for directly comparing<br>the content in the HDR and tone mapped images. We achieve this goal by<br>reformulating classic VGG feature maps into feature contrast maps that<br>normalize local feature differences by their average magnitude in a local<br>neighborhood, allowing our loss to account for contrast masking effects. We<br>perform extensive ablation studies and exploration of parameters and<br>demonstrate that our solution outperforms existing approaches with a single set<br>of fixed parameters, as confirmed by both objective and subjective metrics.<br>}, }
Endnote
%0 Report %A Wang, Chao %A Chen, Bin %A Seidel, Hans-Peter %A Myszkowski, Karol %A Serrano, Ana %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Learning a self-supervised tone mapping operator via feature contrast masking loss : %G eng %U http://hdl.handle.net/21.11116/0000-0009-710E-9 %U https://arxiv.org/abs/2110.09866 %D 2021 %X High Dynamic Range (HDR) content is becoming ubiquitous due to the rapid<br>development of capture technologies. Nevertheless, the dynamic range of common<br>display devices is still limited, therefore tone mapping (TM) remains a key<br>challenge for image visualization. Recent work has demonstrated that neural<br>networks can achieve remarkable performance in this task when compared to<br>traditional methods, however, the quality of the results of these<br>learning-based methods is limited by the training data. Most existing works use<br>as training set a curated selection of best-performing results from existing<br>traditional tone mapping operators (often guided by a quality metric),<br>therefore, the quality of newly generated results is fundamentally limited by<br>the performance of such operators. This quality might be even further limited<br>by the pool of HDR content that is used for training. In this work we propose a<br>learning-based self-supervised tone mapping operator that is trained at test<br>time specifically for each HDR image and does not need any data labeling. The<br>key novelty of our approach is a carefully designed loss function built upon<br>fundamental knowledge on contrast perception that allows for directly comparing<br>the content in the HDR and tone mapped images. We achieve this goal by<br>reformulating classic VGG feature maps into feature contrast maps that<br>normalize local feature differences by their average magnitude in a local<br>neighborhood, allowing our loss to account for contrast masking effects. We<br>perform extensive ablation studies and exploration of parameters and<br>demonstrate that our solution outperforms existing approaches with a single set<br>of fixed parameters, as confirmed by both objective and subjective metrics.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,eess.IV
Serrano, A., Chen, B., Wang, C., et al. 2021. The Effect of Shape and Illumination on Material Perception: Model and Applications. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2021)40, 4.
Export
BibTeX
@article{SIGG2021_Materials, TITLE = {The Effect of Shape and Illumination on Material Perception: Model and Applications}, AUTHOR = {Serrano, Ana and Chen, Bin and Wang, Chao and Piovar{\v c}i, Michal and Seidel, Hans-Peter and Didyk, Piotr and Myszkowski, Karol}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3450626.3459813}, PUBLISHER = {Association for Computing Machinery}, ADDRESS = {New York, NY}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, DATE = {2021}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {40}, NUMBER = {4}, PAGES = {1--16}, EID = {125}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2021}, }
Endnote
%0 Journal Article %A Serrano, Ana %A Chen, Bin %A Wang, Chao %A Piovar&#269;i, Michal %A Seidel, Hans-Peter %A Didyk, Piotr %A Myszkowski, Karol %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T The Effect of Shape and Illumination on Material Perception: Model and Applications : Model and Applications %G eng %U http://hdl.handle.net/21.11116/0000-0009-0565-0 %R 10.1145/3450626.3459813 %7 2021 %D 2021 %J ACM Transactions on Graphics %V 40 %N 4 %& 1 %P 1 - 16 %Z sequence number: 125 %I Association for Computing Machinery %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2021 %O ACM SIGGRAPH 2021
Rudnev, V., Golyanik, V., Wang, J., et al. 2021. EventHands: Real-Time Neural 3D Hand Pose Estimation from an Event Stream. ICCV 2021, IEEE.
Export
BibTeX
@inproceedings{Rudnev_2021_ICCV, TITLE = {{EventHands}: {R}eal-Time Neural {3D} Hand Pose Estimation from an Event Stream}, AUTHOR = {Rudnev, Viktor and Golyanik, Vladislav and Wang, Jiayi and Seidel, Hans-Peter and Mueller, Franziska and Elgharib, Mohamed and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-6654-2812-5}, DOI = {10.1109/ICCV48922.2021.01216}, PUBLISHER = {IEEE}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {ICCV 2021}, PAGES = {12365--12375}, ADDRESS = {Montreal, QC, Canada}, }
Endnote
%0 Conference Proceedings %A Rudnev, Viktor %A Golyanik, Vladislav %A Wang, Jiayi %A Seidel, Hans-Peter %A Mueller, Franziska %A Elgharib, Mohamed %A Theobalt, Christian %+ Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society %T EventHands: Real-Time Neural 3D Hand Pose Estimation from an Event Stream : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B709-1 %R 10.1109/ICCV48922.2021.01216 %D 2021 %B IEEE/CVF International Conference on Computer Vision %Z date of event: 2021-10-10 - 2021-10-17 %C Montreal, QC, Canada %B ICCV 2021 %P 12365 - 12375 %I IEEE %@ 978-1-6654-2812-5
Nehvi, J., Golyanik, V., Mueller, F., Seidel, H.-P., Elgharib, M., and Theobalt, C. 2021. Differentiable Event Stream Simulator for Non-Rigid 3D Tracking. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPR 2021), IEEE.
Export
BibTeX
@inproceedings{Nehvi_CVPR2021Workshop, TITLE = {Differentiable Event Stream Simulator for Non-Rigid {3D} Tracking}, AUTHOR = {Nehvi, Jalees and Golyanik, Vladislav and Mueller, Franziska and Seidel, Hans-Peter and Elgharib, Mohamed and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-6654-4899-4}, DOI = {10.1109/CVPRW53098.2021.00143}, PUBLISHER = {IEEE}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPR 2021)}, PAGES = {1302--1311}, ADDRESS = {Nashville, TN, USA}, }
Endnote
%0 Conference Proceedings %A Nehvi, Jalees %A Golyanik, Vladislav %A Mueller, Franziska %A Seidel, Hans-Peter %A Elgharib, Mohamed %A Theobalt, Christian %+ Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society %T Differentiable Event Stream Simulator for Non-Rigid 3D Tracking : %G eng %U http://hdl.handle.net/21.11116/0000-0008-8957-C %R 10.1109/CVPRW53098.2021.00143 %D 2021 %B 2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops %Z date of event: 2021-06-19 - 2021-06-25 %C Nashville, TN, USA %B Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops %P 1302 - 1311 %I IEEE %@ 978-1-6654-4899-4 %U https://gvv.mpi-inf.mpg.de/projects/Event-based_Non-rigid_3D_Tracking/
Mallikarjun B R, Tewari, A., Seidel, H.-P., Elgharib, M., and Theobalt, C. 2021a. Learning Complete 3D Morphable Face Models from Images and Videos. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2021), IEEE.
Export
BibTeX
@inproceedings{Mallikarjun_CVPR2021b, TITLE = {Learning Complete {3D} Morphable Face Models from Images and Videos}, AUTHOR = {Mallikarjun B R and Tewari, Ayush and Seidel, Hans-Peter and Elgharib, Mohamed and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-6654-4509-2}, DOI = {10.1109/CVPR46437.2021.00337}, PUBLISHER = {IEEE}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2021)}, PAGES = {3361--3371}, ADDRESS = {Virtual Conference}, }
Endnote
%0 Conference Proceedings %A Mallikarjun B R, %A Tewari, Ayush %A Seidel, Hans-Peter %A Elgharib, Mohamed %A Theobalt, Christian %+ Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society %T Learning Complete 3D Morphable Face Models from Images and Videos : %G eng %U http://hdl.handle.net/21.11116/0000-0008-8926-3 %R 10.1109/CVPR46437.2021.00337 %D 2021 %B 34th IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2021-06-19 - 2021-06-25 %C Virtual Conference %B IEEE/CVF Conference on Computer Vision and Pattern Recognition %P 3361 - 3371 %I IEEE %@ 978-1-6654-4509-2 %U https://gvv.mpi-inf.mpg.de/projects/LeMoMo/
Mallikarjun B R, Tewari, A., Oh, T.-H., et al. 2021b. Monocular Reconstruction of Neural Face Reflectance Fields. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2021), IEEE.
Export
BibTeX
@inproceedings{Mallikarjun_CVPR2021, TITLE = {Monocular Reconstruction of Neural Face Reflectance Fields}, AUTHOR = {Mallikarjun B R and Tewari, Ayush and Oh, Tae-Hyun and Weyrich, Tim and Bickel, Bernd and Seidel, Hans-Peter and Pfister, Hanspeter and Matusik, Wojciech and Elgharib, Mohamed and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-6654-4509-2}, DOI = {10.1109/CVPR46437.2021.00476}, PUBLISHER = {IEEE}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2021)}, PAGES = {4791--4800}, ADDRESS = {Virtual Conference}, }
Endnote
%0 Conference Proceedings %A Mallikarjun B R, %A Tewari, Ayush %A Oh, Tae-Hyun %A Weyrich, Tim %A Bickel, Bernd %A Seidel, Hans-Peter %A Pfister, Hanspeter %A Matusik, Wojciech %A Elgharib, Mohamed %A Theobalt, Christian %+ Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society %T Monocular Reconstruction of Neural Face Reflectance Fields : %G eng %U http://hdl.handle.net/21.11116/0000-0008-88FB-4 %R 10.1109/CVPR46437.2021.00476 %D 2021 %B 34th IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2021-06-19 - 2021-06-25 %C Virtual Conference %B IEEE/CVF Conference on Computer Vision and Pattern Recognition %P 4791 - 4800 %I IEEE %@ 978-1-6654-4509-2 %U https://gvv.mpi-inf.mpg.de/projects/FaceReflectanceFields/
Mallikarjun B R, Tewari, A., Dib, A., et al. 2021c. PhotoApp: Photorealistic Appearance Editing of Head Portraits. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2021)40, 4.
Export
BibTeX
@article{MallikarjunBR2021, TITLE = {{PhotoApp}: {P}hotorealistic Appearance Editing of Head Portraits}, AUTHOR = {Mallikarjun B R and Tewari, Ayush and Dib, Abdallah and Weyrich, Tim and Bickel, Bernd and Seidel, Hans-Peter and Pfister, Hanspeter and Matusik, Wojciech and Chevallier, Louis and Elgharib, Mohamed and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3450626.3459765}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {40}, NUMBER = {4}, PAGES = {1--16}, EID = {44}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2021}, }
Endnote
%0 Journal Article %A Mallikarjun B R, %A Tewari, Ayush %A Dib, Abdallah %A Weyrich, Tim %A Bickel, Bernd %A Seidel, Hans-Peter %A Pfister, Hanspeter %A Matusik, Wojciech %A Chevallier, Louis %A Elgharib, Mohamed %A Theobalt, Christian %+ Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society %T PhotoApp: Photorealistic Appearance Editing of Head Portraits : %G eng %U http://hdl.handle.net/21.11116/0000-0009-2A9B-A %R 10.1145/3450626.3459765 %7 2021 %D 2021 %J ACM Transactions on Graphics %V 40 %N 4 %& 1 %P 1 - 16 %Z sequence number: 44 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2021 %O ACM SIGGRAPH 2021
Kappel, M., Golyanik, V., Elgharib, M., et al. 2021. High-Fidelity Neural Human Motion Transfer from Monocular Video. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2021), IEEE.
Export
BibTeX
@inproceedings{Kappel_CVPR2021, TITLE = {High-Fidelity Neural Human Motion Transfer from Monocular Video}, AUTHOR = {Kappel, Moritz and Golyanik, Vladislav and Elgharib, Mohamed and Henningson, Jann-Ole and Seidel, Hans-Peter and Castillo, Susana and Theobalt, Christian and Magnor, Marcus A.}, LANGUAGE = {eng}, ISBN = {978-1-6654-4509-2}, DOI = {10.1109/CVPR46437.2021.00159}, PUBLISHER = {IEEE}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2021)}, PAGES = {1541--1550}, ADDRESS = {Virtual Conference}, }
Endnote
%0 Conference Proceedings %A Kappel, Moritz %A Golyanik, Vladislav %A Elgharib, Mohamed %A Henningson, Jann-Ole %A Seidel, Hans-Peter %A Castillo, Susana %A Theobalt, Christian %A Magnor, Marcus A. %+ External Organizations Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society External Organizations %T High-Fidelity Neural Human Motion Transfer from Monocular Video : %G eng %U http://hdl.handle.net/21.11116/0000-0008-8947-E %R 10.1109/CVPR46437.2021.00159 %D 2021 %B 34th IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2021-06-19 - 2021-06-25 %C Virtual Conference %B IEEE/CVF Conference on Computer Vision and Pattern Recognition %P 1541 - 1550 %I IEEE %@ 978-1-6654-4509-2 %U https://gvv.mpi-inf.mpg.de/projects/NHMT/
Jiang, C., Tang, C., Seidel, H.-P., Chen, R., and Wonka, P. 2021. Computational Design of Lightweight Trusses. Computer-Aided Design141.
Export
BibTeX
@article{Jiang2021, TITLE = {Computational Design of Lightweight Trusses}, AUTHOR = {Jiang, Caigui and Tang, Chengcheng and Seidel, Hans-Peter and Chen, Renjie and Wonka, Peter}, ISSN = {0010-4485}, DOI = {10.1016/j.cad.2021.103076}, PUBLISHER = {Elsevier}, ADDRESS = {Amsterdam}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, JOURNAL = {Computer-Aided Design}, VOLUME = {141}, PAGES = {1--11}, EID = {103076}, }
Endnote
%0 Journal Article %A Jiang, Caigui %A Tang, Chengcheng %A Seidel, Hans-Peter %A Chen, Renjie %A Wonka, Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Computational Design of Lightweight Trusses : %U http://hdl.handle.net/21.11116/0000-0009-70C2-D %R 10.1016/j.cad.2021.103076 %7 2021 %D 2021 %J Computer-Aided Design %V 141 %& 1 %P 1 - 11 %Z sequence number: 103076 %I Elsevier %C Amsterdam %@ false
Hladký, J., Seidel, H.-P., and Steinberger, M. 2021. SnakeBinning: Efficient Temporally Coherent Triangle Packing for Shading Streaming. Computer Graphics Forum (Proc. EUROGRAPHICS 2021)40, 2.
Export
BibTeX
@article{10.1111:cgf.142648, TITLE = {{SnakeBinning}: {E}fficient Temporally Coherent Triangle Packing for Shading Streaming}, AUTHOR = {Hladk{\'y}, Jozef and Seidel, Hans-Peter and Steinberger, Markus}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.142648}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, DATE = {2021}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {40}, NUMBER = {2}, PAGES = {475--488}, BOOKTITLE = {42nd Annual Conference of the European Association for Computer Graphics (EUROGRAPHICS 2021)}, EDITOR = {Mitra, Niloy and Viola, Ivan}, }
Endnote
%0 Journal Article %A Hladk&#253;, Jozef %A Seidel, Hans-Peter %A Steinberger, Markus %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T SnakeBinning: Efficient Temporally Coherent Triangle Packing for Shading Streaming : %G eng %U http://hdl.handle.net/21.11116/0000-0008-7AFD-3 %R 10.1111/cgf.142648 %7 2021 %D 2021 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 40 %N 2 %& 475 %P 475 - 488 %I Blackwell-Wiley %C Oxford %@ false %B 42nd Annual Conference of the European Association for Computer Graphics %O EUROGRAPHICS 2021 EG 2021
Habibie, I., Xu, W., Mehta, D., et al. 2021a. Learning Speech-driven 3D Conversational Gestures from Video. https://arxiv.org/abs/2102.06837.
(arXiv: 2102.06837)
Abstract
We propose the first approach to automatically and jointly synthesize both<br>the synchronous 3D conversational body and hand gestures, as well as 3D face<br>and head animations, of a virtual character from speech input. Our algorithm<br>uses a CNN architecture that leverages the inherent correlation between facial<br>expression and hand gestures. Synthesis of conversational body gestures is a<br>multi-modal problem since many similar gestures can plausibly accompany the<br>same input speech. To synthesize plausible body gestures in this setting, we<br>train a Generative Adversarial Network (GAN) based model that measures the<br>plausibility of the generated sequences of 3D body motion when paired with the<br>input audio features. We also contribute a new way to create a large corpus of<br>more than 33 hours of annotated body, hand, and face data from in-the-wild<br>videos of talking people. To this end, we apply state-of-the-art monocular<br>approaches for 3D body and hand pose estimation as well as dense 3D face<br>performance capture to the video corpus. In this way, we can train on orders of<br>magnitude more data than previous algorithms that resort to complex in-studio<br>motion capture solutions, and thereby train more expressive synthesis<br>algorithms. Our experiments and user study show the state-of-the-art quality of<br>our speech-synthesized full 3D character animations.<br>
Export
BibTeX
@online{Habibie_2102.06837, TITLE = {Learning Speech-driven {3D} Conversational Gestures from Video}, AUTHOR = {Habibie, Ikhsanul and Xu, Weipeng and Mehta, Dushyant and Liu, Lingjie and Seidel, Hans-Peter and Pons-Moll, Gerard and Elgharib, Mohamed and Theobalt, Christian}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2102.06837}, EPRINT = {2102.06837}, EPRINTTYPE = {arXiv}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, ABSTRACT = {We propose the first approach to automatically and jointly synthesize both<br>the synchronous 3D conversational body and hand gestures, as well as 3D face<br>and head animations, of a virtual character from speech input. Our algorithm<br>uses a CNN architecture that leverages the inherent correlation between facial<br>expression and hand gestures. Synthesis of conversational body gestures is a<br>multi-modal problem since many similar gestures can plausibly accompany the<br>same input speech. To synthesize plausible body gestures in this setting, we<br>train a Generative Adversarial Network (GAN) based model that measures the<br>plausibility of the generated sequences of 3D body motion when paired with the<br>input audio features. We also contribute a new way to create a large corpus of<br>more than 33 hours of annotated body, hand, and face data from in-the-wild<br>videos of talking people. To this end, we apply state-of-the-art monocular<br>approaches for 3D body and hand pose estimation as well as dense 3D face<br>performance capture to the video corpus. In this way, we can train on orders of<br>magnitude more data than previous algorithms that resort to complex in-studio<br>motion capture solutions, and thereby train more expressive synthesis<br>algorithms. Our experiments and user study show the state-of-the-art quality of<br>our speech-synthesized full 3D character animations.<br>}, }
Endnote
%0 Report %A Habibie, Ikhsanul %A Xu, Weipeng %A Mehta, Dushyant %A Liu, Lingjie %A Seidel, Hans-Peter %A Pons-Moll, Gerard %A Elgharib, Mohamed %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society %T Learning Speech-driven 3D Conversational Gestures from Video : %G eng %U http://hdl.handle.net/21.11116/0000-0009-70C7-8 %U https://arxiv.org/abs/2102.06837 %D 2021 %X We propose the first approach to automatically and jointly synthesize both<br>the synchronous 3D conversational body and hand gestures, as well as 3D face<br>and head animations, of a virtual character from speech input. Our algorithm<br>uses a CNN architecture that leverages the inherent correlation between facial<br>expression and hand gestures. Synthesis of conversational body gestures is a<br>multi-modal problem since many similar gestures can plausibly accompany the<br>same input speech. To synthesize plausible body gestures in this setting, we<br>train a Generative Adversarial Network (GAN) based model that measures the<br>plausibility of the generated sequences of 3D body motion when paired with the<br>input audio features. We also contribute a new way to create a large corpus of<br>more than 33 hours of annotated body, hand, and face data from in-the-wild<br>videos of talking people. To this end, we apply state-of-the-art monocular<br>approaches for 3D body and hand pose estimation as well as dense 3D face<br>performance capture to the video corpus. In this way, we can train on orders of<br>magnitude more data than previous algorithms that resort to complex in-studio<br>motion capture solutions, and thereby train more expressive synthesis<br>algorithms. Our experiments and user study show the state-of-the-art quality of<br>our speech-synthesized full 3D character animations.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Habibie, I., Xu, W., Mehta, D., et al. 2021b. Learning Speech-driven 3D Conversational Gestures from Video. Proceedings of the 21st ACM International Conference on Intelligent Virtual Agents (IVA 2021), ACM.
Export
BibTeX
@inproceedings{Habibie_IVA2021, TITLE = {Learning Speech-driven {3D} Conversational Gestures from Video}, AUTHOR = {Habibie, Ikhsanul and Xu, Weipeng and Mehta, Dushyant and Liu, Lingjie and Seidel, Hans-Peter and Pons-Moll, Gerard and Elgharib, Mohamed and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {9781450386197}, DOI = {10.1145/3472306.3478335}, PUBLISHER = {ACM}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {Proceedings of the 21st ACM International Conference on Intelligent Virtual Agents (IVA 2021)}, PAGES = {101--108}, ADDRESS = {Virtual Event, Japan}, }
Endnote
%0 Conference Proceedings %A Habibie, Ikhsanul %A Xu, Weipeng %A Mehta, Dushyant %A Liu, Lingjie %A Seidel, Hans-Peter %A Pons-Moll, Gerard %A Elgharib, Mohamed %A Theobalt, Christian %+ Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society %T Learning Speech-driven 3D Conversational Gestures from Video : %G eng %U http://hdl.handle.net/21.11116/0000-0009-4D19-6 %R 10.1145/3472306.3478335 %D 2021 %B 21st ACM International Conference on Intelligent Virtual Agents %Z date of event: 2021-09-14 - 2021-09-17 %C Virtual Event, Japan %B Proceedings of the 21st ACM International Conference on Intelligent Virtual Agents %P 101 - 108 %I ACM %@ 9781450386197
Chu, M., Thuerey, N., Seidel, H.-P., Theobalt, C., and Zayer, R. 2021. Learning Meaningful Controls for Fluids. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2021)40, 4.
Export
BibTeX
@article{Chu2021, TITLE = {Learning Meaningful Controls for Fluids}, AUTHOR = {Chu, Mengyu and Thuerey, Nils and Seidel, Hans-Peter and Theobalt, Christian and Zayer, Rhaleb}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3450626.3459845}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {40}, NUMBER = {4}, PAGES = {1--13}, EID = {100}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2021}, }
Endnote
%0 Journal Article %A Chu, Mengyu %A Thuerey, Nils %A Seidel, Hans-Peter %A Theobalt, Christian %A Zayer, Rhaleb %+ Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Learning Meaningful Controls for Fluids : %G eng %U http://hdl.handle.net/21.11116/0000-0009-4B91-F %R 10.1145/3450626.3459845 %7 2021 %D 2021 %J ACM Transactions on Graphics %V 40 %N 4 %& 1 %P 1 - 13 %Z sequence number: 100 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2021 %O ACM SIGGRAPH 2021
Chen, B., Wang, C., Piovarči, M., et al. 2021. The Effect of Geometry and Illumination on Appearance Perception of Different Material Categories. The Visual Computer37.
Export
BibTeX
@article{Chen2021b, TITLE = {The Effect of Geometry and Illumination on Appearance Perception of Different Material Categories}, AUTHOR = {Chen, Bin and Wang, Chao and Piovar{\v c}i, Michal and Seidel, Hans-Peter and Didyk, Piotr and Myszkowski, Karol and Serrano, Ana}, LANGUAGE = {eng}, ISSN = {0178-2789}, DOI = {10.1007/s00371-021-02227-x}, PUBLISHER = {Springer}, ADDRESS = {Berlin}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, JOURNAL = {The Visual Computer}, VOLUME = {37}, PAGES = {2975--2987}, }
Endnote
%0 Journal Article %A Chen, Bin %A Wang, Chao %A Piovar&#269;i, Michal %A Seidel, Hans-Peter %A Didyk, Piotr %A Myszkowski, Karol %A Serrano, Ana %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T The Effect of Geometry and Illumination on Appearance Perception of Different Material Categories : %G eng %U http://hdl.handle.net/21.11116/0000-0008-F05C-2 %R 10.1007/s00371-021-02227-x %7 2021 %D 2021 %J The Visual Computer %V 37 %& 2975 %P 2975 - 2987 %I Springer %C Berlin %@ false
Ansari, N., Seidel, H.-P., and Babaei, V. 2021. Mixed Integer Neural Inverse Design. https://arxiv.org/abs/2109.12888.
(arXiv: 2109.12888)
Abstract
In computational design and fabrication, neural networks are becoming<br>important surrogates for bulky forward simulations. A long-standing,<br>intertwined question is that of inverse design: how to compute a design that<br>satisfies a desired target performance? Here, we show that the piecewise linear<br>property, very common in everyday neural networks, allows for an inverse design<br>formulation based on mixed-integer linear programming. Our mixed-integer<br>inverse design uncovers globally optimal or near optimal solutions in a<br>principled manner. Furthermore, our method significantly facilitates emerging,<br>but challenging, combinatorial inverse design tasks, such as material<br>selection. For problems where finding the optimal solution is not desirable or<br>tractable, we develop an efficient yet near-optimal hybrid optimization.<br>Eventually, our method is able to find solutions provably robust to possible<br>fabrication perturbations among multiple designs with similar performances.<br>
Export
BibTeX
@online{Ansari_2109.12888, TITLE = {Mixed Integer Neural Inverse Design}, AUTHOR = {Ansari, Navid and Seidel, Hans-Peter and Babaei, Vahid}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2109.12888}, EPRINT = {2109.12888}, EPRINTTYPE = {arXiv}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, ABSTRACT = {In computational design and fabrication, neural networks are becoming<br>important surrogates for bulky forward simulations. A long-standing,<br>intertwined question is that of inverse design: how to compute a design that<br>satisfies a desired target performance? Here, we show that the piecewise linear<br>property, very common in everyday neural networks, allows for an inverse design<br>formulation based on mixed-integer linear programming. Our mixed-integer<br>inverse design uncovers globally optimal or near optimal solutions in a<br>principled manner. Furthermore, our method significantly facilitates emerging,<br>but challenging, combinatorial inverse design tasks, such as material<br>selection. For problems where finding the optimal solution is not desirable or<br>tractable, we develop an efficient yet near-optimal hybrid optimization.<br>Eventually, our method is able to find solutions provably robust to possible<br>fabrication perturbations among multiple designs with similar performances.<br>}, }
Endnote
%0 Report %A Ansari, Navid %A Seidel, Hans-Peter %A Babaei, Vahid %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Mixed Integer Neural Inverse Design : %G eng %U http://hdl.handle.net/21.11116/0000-0009-7104-3 %U https://arxiv.org/abs/2109.12888 %D 2021 %X In computational design and fabrication, neural networks are becoming<br>important surrogates for bulky forward simulations. A long-standing,<br>intertwined question is that of inverse design: how to compute a design that<br>satisfies a desired target performance? Here, we show that the piecewise linear<br>property, very common in everyday neural networks, allows for an inverse design<br>formulation based on mixed-integer linear programming. Our mixed-integer<br>inverse design uncovers globally optimal or near optimal solutions in a<br>principled manner. Furthermore, our method significantly facilitates emerging,<br>but challenging, combinatorial inverse design tasks, such as material<br>selection. For problems where finding the optimal solution is not desirable or<br>tractable, we develop an efficient yet near-optimal hybrid optimization.<br>Eventually, our method is able to find solutions provably robust to possible<br>fabrication perturbations among multiple designs with similar performances.<br> %K Computer Science, Graphics, cs.GR,Computer Science, Learning, cs.LG
2020
Zheng, Q., Babaei, V., Wetzstein, G., Seidel, H.-P., Zwicker, M., and Singh, G. 2020. Neural Light Field 3D Printing. ACM Transactions on Graphics (Proc. SIGGRAPH Asia 2020)39, 6.
Export
BibTeX
@article{Zheng_TOG2020, TITLE = {Neural Light Field {3D} Printing}, AUTHOR = {Zheng, Quan and Babaei, Vahid and Wetzstein, Gordon and Seidel, Hans-Peter and Zwicker, Matthias and Singh, Gurprit}, ISSN = {0730-0301}, DOI = {10.1145/3414685.3417879}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2020}, JOURNAL = {ACM Transactions on Graphics (Proc. SIGGRAPH Asia)}, VOLUME = {39}, NUMBER = {6}, EID = {207}, BOOKTITLE = {Proceedings of the SIGGRAPH Asia 2020}, EDITOR = {Myszkowski, Karol}, }
Endnote
%0 Journal Article %A Zheng, Quan %A Babaei, Vahid %A Wetzstein, Gordon %A Seidel, Hans-Peter %A Zwicker, Matthias %A Singh, Gurprit %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Neural Light Field 3D Printing : %U http://hdl.handle.net/21.11116/0000-0007-9AA8-E %R 10.1145/3414685.3417879 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 6 %Z sequence number: 207 %I ACM %C New York, NY %@ false %B Proceedings of the SIGGRAPH Asia 2020 %O SIGGRAPH Asia 2020 SA'20 SA 2020
Yu, Y., Meka, A., Elgharib, M., Seidel, H.-P., Theobalt, C., and Smith, W.A.P. 2020. Self-supervised Outdoor Scene Relighting. Computer Vision -- ECCV 2020, Springer.
Export
BibTeX
@inproceedings{yu_ECCV20, TITLE = {Self-supervised Outdoor Scene Relighting}, AUTHOR = {Yu, Ye and Meka, Abhimitra and Elgharib, Mohamed and Seidel, Hans-Peter and Theobalt, Christian and Smith, William A. P.}, LANGUAGE = {eng}, ISBN = {978-3-030-58541-9}, DOI = {10.1007/978-3-030-58542-6_6}, PUBLISHER = {Springer}, YEAR = {2020}, DATE = {2020}, BOOKTITLE = {Computer Vision -- ECCV 2020}, EDITOR = {Vedaldi, Andrea and Bischof, Horst and Brox, Thomas and Frahm, Jan-Michael}, PAGES = {84--101}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {12367}, ADDRESS = {Glasgow, UK}, }
Endnote
%0 Conference Proceedings %A Yu, Ye %A Meka, Abhimitra %A Elgharib, Mohamed %A Seidel, Hans-Peter %A Theobalt, Christian %A Smith, William A. P. %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Self-supervised Outdoor Scene Relighting : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B0F6-C %R 10.1007/978-3-030-58542-6_6 %D 2020 %B 16th European Conference on Computer Vision %Z date of event: 2020-08-23 - 2020-08-28 %C Glasgow, UK %B Computer Vision -- ECCV 2020 %E Vedaldi, Andrea; Bischof, Horst; Brox, Thomas; Frahm, Jan-Michael %P 84 - 101 %I Springer %@ 978-3-030-58541-9 %B Lecture Notes in Computer Science %N 12367
Yenamandra, T., Tewari, A., Bernard, F., et al. 2020. i3DMM: Deep Implicit 3D Morphable Model of Human Heads. https://arxiv.org/abs/2011.14143.
(arXiv: 2011.14143)
Abstract
We present the first deep implicit 3D morphable model (i3DMM) of full heads.<br>Unlike earlier morphable face models it not only captures identity-specific<br>geometry, texture, and expressions of the frontal face, but also models the<br>entire head, including hair. We collect a new dataset consisting of 64 people<br>with different expressions and hairstyles to train i3DMM. Our approach has the<br>following favorable properties: (i) It is the first full head morphable model<br>that includes hair. (ii) In contrast to mesh-based models it can be trained on<br>merely rigidly aligned scans, without requiring difficult non-rigid<br>registration. (iii) We design a novel architecture to decouple the shape model<br>into an implicit reference shape and a deformation of this reference shape.<br>With that, dense correspondences between shapes can be learned implicitly. (iv)<br>This architecture allows us to semantically disentangle the geometry and color<br>components, as color is learned in the reference space. Geometry is further<br>disentangled as identity, expressions, and hairstyle, while color is<br>disentangled as identity and hairstyle components. We show the merits of i3DMM<br>using ablation studies, comparisons to state-of-the-art models, and<br>applications such as semantic head editing and texture transfer. We will make<br>our model publicly available.<br>
Export
BibTeX
@online{Yenamandra_arXiv2011.14143, TITLE = {i{3D}MM: Deep Implicit {3D} Morphable Model of Human Heads}, AUTHOR = {Yenamandra, Tarun and Tewari, Ayush and Bernard, Florian and Seidel, Hans-Peter and Elgharib, Mohamed and Cremers, Daniel and Theobalt, Christian}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2011.14143}, EPRINT = {2011.14143}, EPRINTTYPE = {arXiv}, YEAR = {2020}, ABSTRACT = {We present the first deep implicit 3D morphable model (i3DMM) of full heads.<br>Unlike earlier morphable face models it not only captures identity-specific<br>geometry, texture, and expressions of the frontal face, but also models the<br>entire head, including hair. We collect a new dataset consisting of 64 people<br>with different expressions and hairstyles to train i3DMM. Our approach has the<br>following favorable properties: (i) It is the first full head morphable model<br>that includes hair. (ii) In contrast to mesh-based models it can be trained on<br>merely rigidly aligned scans, without requiring difficult non-rigid<br>registration. (iii) We design a novel architecture to decouple the shape model<br>into an implicit reference shape and a deformation of this reference shape.<br>With that, dense correspondences between shapes can be learned implicitly. (iv)<br>This architecture allows us to semantically disentangle the geometry and color<br>components, as color is learned in the reference space. Geometry is further<br>disentangled as identity, expressions, and hairstyle, while color is<br>disentangled as identity and hairstyle components. We show the merits of i3DMM<br>using ablation studies, comparisons to state-of-the-art models, and<br>applications such as semantic head editing and texture transfer. We will make<br>our model publicly available.<br>}, }
Endnote
%0 Report %A Yenamandra, Tarun %A Tewari, Ayush %A Bernard, Florian %A Seidel, Hans-Peter %A Elgharib, Mohamed %A Cremers, Daniel %A Theobalt, Christian %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T i3DMM: Deep Implicit 3D Morphable Model of Human Heads : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B702-8 %U https://arxiv.org/abs/2011.14143 %D 2020 %X We present the first deep implicit 3D morphable model (i3DMM) of full heads.<br>Unlike earlier morphable face models it not only captures identity-specific<br>geometry, texture, and expressions of the frontal face, but also models the<br>entire head, including hair. We collect a new dataset consisting of 64 people<br>with different expressions and hairstyles to train i3DMM. Our approach has the<br>following favorable properties: (i) It is the first full head morphable model<br>that includes hair. (ii) In contrast to mesh-based models it can be trained on<br>merely rigidly aligned scans, without requiring difficult non-rigid<br>registration. (iii) We design a novel architecture to decouple the shape model<br>into an implicit reference shape and a deformation of this reference shape.<br>With that, dense correspondences between shapes can be learned implicitly. (iv)<br>This architecture allows us to semantically disentangle the geometry and color<br>components, as color is learned in the reference space. Geometry is further<br>disentangled as identity, expressions, and hairstyle, while color is<br>disentangled as identity and hairstyle components. We show the merits of i3DMM<br>using ablation studies, comparisons to state-of-the-art models, and<br>applications such as semantic head editing and texture transfer. We will make<br>our model publicly available.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR,Computer Science, Learning, cs.LG
Tewari, A., Elgharib, M., Bharaj, G., et al. 2020a. StyleRig: Rigging StyleGAN for 3D Control Over Portrait Images. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2020), IEEE.
Export
BibTeX
@inproceedings{Tewari_CVPR2020, TITLE = {{StyleRig}: {R}igging {StyleGAN} for {3D} Control Over Portrait Images}, AUTHOR = {Tewari, Ayush and Elgharib, Mohamed and Bharaj, Gaurav and Bernard, Florian and Seidel, Hans-Peter and P{\'e}rez, Patrick and Zollh{\"o}fer, Michael and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-7281-7168-5}, DOI = {10.1109/CVPR42600.2020.00618}, PUBLISHER = {IEEE}, YEAR = {2020}, BOOKTITLE = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2020)}, PAGES = {6141--6150}, ADDRESS = {Seattle, WA, USA (Virtual)}, }
Endnote
%0 Conference Proceedings %A Tewari, Ayush %A Elgharib, Mohamed %A Bharaj, Gaurav %A Bernard, Florian %A Seidel, Hans-Peter %A P&#233;rez, Patrick %A Zollh&#246;fer, Michael %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T StyleRig: Rigging StyleGAN for 3D Control Over Portrait Images : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B0E7-D %R 10.1109/CVPR42600.2020.00618 %D 2020 %B 33rd IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2020-06-14 - 2020-06-19 %C Seattle, WA, USA (Virtual) %B IEEE/CVF Conference on Computer Vision and Pattern Recognition %P 6141 - 6150 %I IEEE %@ 978-1-7281-7168-5
Tewari, A., Elgharib, M., Bharaj, G., et al. 2020b. StyleRig: Rigging StyleGAN for 3D Control over Portrait Images. https://arxiv.org/abs/2004.00121.
(arXiv: 2004.00121)
Abstract
StyleGAN generates photorealistic portrait images of faces with eyes, teeth,<br>hair and context (neck, shoulders, background), but lacks a rig-like control<br>over semantic face parameters that are interpretable in 3D, such as face pose,<br>expressions, and scene illumination. Three-dimensional morphable face models<br>(3DMMs) on the other hand offer control over the semantic parameters, but lack<br>photorealism when rendered and only model the face interior, not other parts of<br>a portrait image (hair, mouth interior, background). We present the first<br>method to provide a face rig-like control over a pretrained and fixed StyleGAN<br>via a 3DMM. A new rigging network, RigNet is trained between the 3DMM's<br>semantic parameters and StyleGAN's input. The network is trained in a<br>self-supervised manner, without the need for manual annotations. At test time,<br>our method generates portrait images with the photorealism of StyleGAN and<br>provides explicit control over the 3D semantic parameters of the face.<br>
Export
BibTeX
@online{Tewari_2004.00121, TITLE = {{StyleRig}: Rigging {StyleGAN} for {3D} Control over Portrait Images}, AUTHOR = {Tewari, Ayush and Elgharib, Mohamed and Bharaj, Gaurav and Bernard, Florian and Seidel, Hans-Peter and P{\'e}rez, Patrick and Zollh{\"o}fer, Michael and Theobalt, Christian}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2004.00121}, EPRINT = {2004.00121}, EPRINTTYPE = {arXiv}, YEAR = {2020}, ABSTRACT = {StyleGAN generates photorealistic portrait images of faces with eyes, teeth,<br>hair and context (neck, shoulders, background), but lacks a rig-like control<br>over semantic face parameters that are interpretable in 3D, such as face pose,<br>expressions, and scene illumination. Three-dimensional morphable face models<br>(3DMMs) on the other hand offer control over the semantic parameters, but lack<br>photorealism when rendered and only model the face interior, not other parts of<br>a portrait image (hair, mouth interior, background). We present the first<br>method to provide a face rig-like control over a pretrained and fixed StyleGAN<br>via a 3DMM. A new rigging network, RigNet is trained between the 3DMM's<br>semantic parameters and StyleGAN's input. The network is trained in a<br>self-supervised manner, without the need for manual annotations. At test time,<br>our method generates portrait images with the photorealism of StyleGAN and<br>provides explicit control over the 3D semantic parameters of the face.<br>}, }
Endnote
%0 Report %A Tewari, Ayush %A Elgharib, Mohamed %A Bharaj, Gaurav %A Bernard, Florian %A Seidel, Hans-Peter %A P&#233;rez, Patrick %A Zollh&#246;fer, Michael %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T StyleRig: Rigging StyleGAN for 3D Control over Portrait Images : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B0FC-6 %U https://arxiv.org/abs/2004.00121 %D 2020 %X StyleGAN generates photorealistic portrait images of faces with eyes, teeth,<br>hair and context (neck, shoulders, background), but lacks a rig-like control<br>over semantic face parameters that are interpretable in 3D, such as face pose,<br>expressions, and scene illumination. Three-dimensional morphable face models<br>(3DMMs) on the other hand offer control over the semantic parameters, but lack<br>photorealism when rendered and only model the face interior, not other parts of<br>a portrait image (hair, mouth interior, background). We present the first<br>method to provide a face rig-like control over a pretrained and fixed StyleGAN<br>via a 3DMM. A new rigging network, RigNet is trained between the 3DMM's<br>semantic parameters and StyleGAN's input. The network is trained in a<br>self-supervised manner, without the need for manual annotations. At test time,<br>our method generates portrait images with the photorealism of StyleGAN and<br>provides explicit control over the 3D semantic parameters of the face.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR
Tewari, A., Elgharib, M., Mallikarjun B R, et al. 2020c. PIE: Portrait Image Embedding for Semantic Control. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2020)39, 6.
Export
BibTeX
@article{Tewari_ToG2020, TITLE = {{PIE}: {P}ortrait Image Embedding for Semantic Control}, AUTHOR = {Tewari, Ayush and Elgharib, Mohamed and Mallikarjun B R and Bernard, Florian and Seidel, Hans-Peter and P{\'e}rez, Patrick and Zollh{\"o}fer, Michael and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3414685.3417803}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2020}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {39}, NUMBER = {6}, EID = {223}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2020}, EDITOR = {Myszkowski, Karol}, }
Endnote
%0 Journal Article %A Tewari, Ayush %A Elgharib, Mohamed %A Mallikarjun B R, %A Bernard, Florian %A Seidel, Hans-Peter %A P&#233;rez, Patrick %A Zollh&#246;fer, Michael %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T PIE: Portrait Image Embedding for Semantic Control : %G eng %U http://hdl.handle.net/21.11116/0000-0007-9B0C-E %R 10.1145/3414685.3417803 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 6 %Z sequence number: 223 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2020 %O ACM SIGGRAPH Asia 2020 SA'20 SA 2020
Tewari, A., Elgharib, M., Mallikarjun B R, et al. 2020d. PIE: Portrait Image Embedding for Semantic Control. https://arxiv.org/abs/2009.09485.
(arXiv: 2009.09485)
Abstract
Editing of portrait images is a very popular and important research topic<br>with a large variety of applications. For ease of use, control should be<br>provided via a semantically meaningful parameterization that is akin to<br>computer animation controls. The vast majority of existing techniques do not<br>provide such intuitive and fine-grained control, or only enable coarse editing<br>of a single isolated control parameter. Very recently, high-quality<br>semantically controlled editing has been demonstrated, however only on<br>synthetically created StyleGAN images. We present the first approach for<br>embedding real portrait images in the latent space of StyleGAN, which allows<br>for intuitive editing of the head pose, facial expression, and scene<br>illumination in the image. Semantic editing in parameter space is achieved<br>based on StyleRig, a pretrained neural network that maps the control space of a<br>3D morphable face model to the latent space of the GAN. We design a novel<br>hierarchical non-linear optimization problem to obtain the embedding. An<br>identity preservation energy term allows spatially coherent edits while<br>maintaining facial integrity. Our approach runs at interactive frame rates and<br>thus allows the user to explore the space of possible edits. We evaluate our<br>approach on a wide set of portrait photos, compare it to the current state of<br>the art, and validate the effectiveness of its components in an ablation study.<br>
Export
BibTeX
@online{Tewari_2009.09485, TITLE = {{PIE}: {P}ortrait Image Embedding for Semantic Control}, AUTHOR = {Tewari, Ayush and Elgharib, Mohamed and Mallikarjun B R and Bernard, Florian and Seidel, Hans-Peter and P{\'e}rez, Patrick and Zollh{\"o}fer, Michael and Theobalt, Christian}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2009.09485}, EPRINT = {2009.09485}, EPRINTTYPE = {arXiv}, YEAR = {2020}, ABSTRACT = {Editing of portrait images is a very popular and important research topic<br>with a large variety of applications. For ease of use, control should be<br>provided via a semantically meaningful parameterization that is akin to<br>computer animation controls. The vast majority of existing techniques do not<br>provide such intuitive and fine-grained control, or only enable coarse editing<br>of a single isolated control parameter. Very recently, high-quality<br>semantically controlled editing has been demonstrated, however only on<br>synthetically created StyleGAN images. We present the first approach for<br>embedding real portrait images in the latent space of StyleGAN, which allows<br>for intuitive editing of the head pose, facial expression, and scene<br>illumination in the image. Semantic editing in parameter space is achieved<br>based on StyleRig, a pretrained neural network that maps the control space of a<br>3D morphable face model to the latent space of the GAN. We design a novel<br>hierarchical non-linear optimization problem to obtain the embedding. An<br>identity preservation energy term allows spatially coherent edits while<br>maintaining facial integrity. Our approach runs at interactive frame rates and<br>thus allows the user to explore the space of possible edits. We evaluate our<br>approach on a wide set of portrait photos, compare it to the current state of<br>the art, and validate the effectiveness of its components in an ablation study.<br>}, }
Endnote
%0 Report %A Tewari, Ayush %A Elgharib, Mohamed %A Mallikarjun B R, %A Bernard, Florian %A Seidel, Hans-Peter %A P&#233;rez, Patrick %A Zollh&#246;fer, Michael %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T PIE: Portrait Image Embedding for Semantic Control : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B117-7 %U https://arxiv.org/abs/2009.09485 %D 2020 %X Editing of portrait images is a very popular and important research topic<br>with a large variety of applications. For ease of use, control should be<br>provided via a semantically meaningful parameterization that is akin to<br>computer animation controls. The vast majority of existing techniques do not<br>provide such intuitive and fine-grained control, or only enable coarse editing<br>of a single isolated control parameter. Very recently, high-quality<br>semantically controlled editing has been demonstrated, however only on<br>synthetically created StyleGAN images. We present the first approach for<br>embedding real portrait images in the latent space of StyleGAN, which allows<br>for intuitive editing of the head pose, facial expression, and scene<br>illumination in the image. Semantic editing in parameter space is achieved<br>based on StyleRig, a pretrained neural network that maps the control space of a<br>3D morphable face model to the latent space of the GAN. We design a novel<br>hierarchical non-linear optimization problem to obtain the embedding. An<br>identity preservation energy term allows spatially coherent edits while<br>maintaining facial integrity. Our approach runs at interactive frame rates and<br>thus allows the user to explore the space of possible edits. We evaluate our<br>approach on a wide set of portrait photos, compare it to the current state of<br>the art, and validate the effectiveness of its components in an ablation study.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR
Stadlbauer, P., Mlakar, D., Seidel, H.-P., Steinberger, M., and Zayer, R. 2020. Interactive Modeling of Cellular Structures on Surfaces with Application to Additive Manufacturing. Computer Graphics Forum (Proc. EUROGRAPHICS 2020)39, 2.
Export
BibTeX
@article{Stadlbauer_EG2020, TITLE = {Interactive Modeling of Cellular Structures on Surfaces with Application to Additive Manufacturing}, AUTHOR = {Stadlbauer, Pascal and Mlakar, Daniel and Seidel, Hans-Peter and Steinberger, Markus and Zayer, Rhaleb}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13929}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2020}, DATE = {2020}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {39}, NUMBER = {2}, PAGES = {277--289}, BOOKTITLE = {The European Association for Computer Graphics 41st Annual Conference (EUROGRAPHICS 2020)}, EDITOR = {Panozzo, Daniele and Assarsson, Ulf}, }
Endnote
%0 Journal Article %A Stadlbauer, Pascal %A Mlakar, Daniel %A Seidel, Hans-Peter %A Steinberger, Markus %A Zayer, Rhaleb %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive Modeling of Cellular Structures on Surfaces with Application to Additive Manufacturing : %G eng %U http://hdl.handle.net/21.11116/0000-0006-DB8A-8 %R 10.1111/cgf.13929 %7 2020 %D 2020 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 39 %N 2 %& 277 %P 277 - 289 %I Blackwell-Wiley %C Oxford %@ false %B The European Association for Computer Graphics 41st Annual Conference %O EUROGRAPHICS 2020 EG 2020 The European Association for Computer Graphics 41st Annual Conference ; Norrk&#246;ping, Sweden, May 25 &#8211; 29, 2020
Shahmirzadi, A.A., Babaei, V., and Seidel, H.-P. 2020. A Multispectral Dataset of Oil and Watercolor Paints. Electronic Imaging32.
Export
BibTeX
@article{shahmirzadi2020multispectral, TITLE = {A Multispectral Dataset of Oil and Watercolor Paints}, AUTHOR = {Shahmirzadi, Azadeh Asadi and Babaei, Vahid and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.2352/ISSN.2470-1173.2020.5.MAAP-107}, PUBLISHER = {IS\&T}, PUBLISHER = {Society for Imaging Science and Technology}, ADDRESS = {Springfield, VA}, ADDRESS = {Springfield}, YEAR = {2020}, JOURNAL = {Electronic Imaging}, VOLUME = {32}, PAGES = {1--4}, EID = {107}, BOOKTITLE = {Proceedings of the Material Appearance 2020}, EDITOR = {H{\'e}bert, Mathieu and Simonot, Lionel and Tastl, Ingeborg}, }
Endnote
%0 Journal Article %A Shahmirzadi, Azadeh Asadi %A Babaei, Vahid %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Multispectral Dataset of Oil and Watercolor Paints : %G eng %U http://hdl.handle.net/21.11116/0000-0007-F064-9 %R 10.2352/ISSN.2470-1173.2020.5.MAAP-107 %7 2020 %D 2020 %J Electronic Imaging %V 32 %& 1 %P 1 - 4 %Z sequence number: 107 %I IS&T %C Springfield, VA %B Proceedings of the Material Appearance 2020 %O Burlingame, CA, USA, January 26-30, 2020 %I Society for Imaging Science and Technology %C Springfield
Saberpour, A., Hersch, R.D., Fang, J., Zayer, R., Seidel, H.-P., and Babaei, V. 2020. Fabrication of Moiré on Curved Surfaces. Optics Express28, 13.
Export
BibTeX
@article{Saberpour2020, TITLE = {Fabrication of Moir{\'e} on Curved Surfaces}, AUTHOR = {Saberpour, Artin and Hersch, Roger D. and Fang, Jiajing and Zayer, Rhaleb and Seidel, Hans-Peter and Babaei, Vahid}, LANGUAGE = {eng}, ISSN = {1094-4087}, DOI = {10.1364/OE.393843}, PUBLISHER = {Optical Society of America}, ADDRESS = {Washington, DC}, YEAR = {2020}, DATE = {2020}, JOURNAL = {Optics Express}, VOLUME = {28}, NUMBER = {13}, PAGES = {19413--19427}, }
Endnote
%0 Journal Article %A Saberpour, Artin %A Hersch, Roger D. %A Fang, Jiajing %A Zayer, Rhaleb %A Seidel, Hans-Peter %A Babaei, Vahid %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Fabrication of Moir&#233; on Curved Surfaces : %G eng %U http://hdl.handle.net/21.11116/0000-0006-D39D-B %R 10.1364/OE.393843 %7 2020 %D 2020 %J Optics Express %O Opt. Express %V 28 %N 13 %& 19413 %P 19413 - 19427 %I Optical Society of America %C Washington, DC %@ false
Mlakar, D., Winter, M., Stadlbauer, P., Seidel, H.-P., Steinberger, M., and Zayer, R. 2020. Subdivision-Specialized Linear Algebra Kernels for Static and Dynamic Mesh Connectivity on the GPU. Computer Graphics Forum (Proc. EUROGRAPHICS 2020)39, 2.
Export
BibTeX
@article{Mlakar_EG2020, TITLE = {Subdivision-Specialized Linear Algebra Kernels for Static and Dynamic Mesh Connectivity on the {GPU}}, AUTHOR = {Mlakar, Daniel and Winter, M. and Stadlbauer, Pascal and Seidel, Hans-Peter and Steinberger, Markus and Zayer, Rhaleb}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13934}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2020}, DATE = {2020}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {39}, NUMBER = {2}, PAGES = {335--349}, BOOKTITLE = {The European Association for Computer Graphics 41st Annual Conference (EUROGRAPHICS 2020)}, EDITOR = {Panozzo, Daniele and Assarsson, Ulf}, }
Endnote
%0 Journal Article %A Mlakar, Daniel %A Winter, M. %A Stadlbauer, Pascal %A Seidel, Hans-Peter %A Steinberger, Markus %A Zayer, Rhaleb %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Subdivision-Specialized Linear Algebra Kernels for Static and Dynamic Mesh Connectivity on the GPU : %G eng %U http://hdl.handle.net/21.11116/0000-0006-DB80-2 %R 10.1111/cgf.13934 %7 2020 %D 2020 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 39 %N 2 %& 335 %P 335 - 349 %I Blackwell-Wiley %C Oxford %@ false %B The European Association for Computer Graphics 41st Annual Conference %O EUROGRAPHICS 2020 EG 2020 The European Association for Computer Graphics 41st Annual Conference ; Norrk&#246;ping, Sweden, May 25 &#8211; 29, 2020
Mehta, D., Sotnychenko, O., Mueller, F., et al. 2020a. XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2020)39, 4.
Export
BibTeX
@article{Mehta_TOG2020, TITLE = {{XNect}: {R}eal-time Multi-person {3D} Human Pose Estimation with a Single {RGB} Camera}, AUTHOR = {Mehta, Dushyant and Sotnychenko, Oleksandr and Mueller, Franziska and Xu, Weipeng and Elgharib, Mohamed and Fua, Pascal and Seidel, Hans-Peter and Rhodin, Helge and Pons-Moll, Gerard and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3386569.3392410}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2020}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {39}, NUMBER = {4}, EID = {82}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2020}, }
Endnote
%0 Journal Article %A Mehta, Dushyant %A Sotnychenko, Oleksandr %A Mueller, Franziska %A Xu, Weipeng %A Elgharib, Mohamed %A Fua, Pascal %A Seidel, Hans-Peter %A Rhodin, Helge %A Pons-Moll, Gerard %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera : %G eng %U http://hdl.handle.net/21.11116/0000-0007-832D-3 %R 10.1145/3386569.3392410 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 4 %Z sequence number: 82 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2020 %O ACM SIGGRAPH 2020 Virtual Conference ; 2020, 17-28 August
Mehta, D., Sotnychenko, O., Mueller, F., et al. 2020b. XNect: Real-time Multi-person 3D Motion Capture with a Single RGB Camera. ACM Transactions on Graphics39, 4.
Export
BibTeX
@article{DBLP:journals/tog/MehtaS0XEFSRPT20, TITLE = {{XNect}: Real-time Multi-person {3D} Motion Capture with a Single {RGB} Camera}, AUTHOR = {Mehta, Dushyant and Sotnychenko, Oleksandr and Mueller, Franziska and Xu, Weipeng and Elgharib, Mohamed and Fua, Pascal and Seidel, Hans-Peter and Rhodin, Helge and Pons-Moll, Gerard and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3386569.3392410}, PUBLISHER = {Association for Computing Machinery}, ADDRESS = {New York, NY}, YEAR = {2020}, DATE = {2020}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {39}, NUMBER = {4}, PAGES = {1--17}, EID = {82}, }
Endnote
%0 Journal Article %A Mehta, Dushyant %A Sotnychenko, Oleksandr %A Mueller, Franziska %A Xu, Weipeng %A Elgharib, Mohamed %A Fua, Pascal %A Seidel, Hans-Peter %A Rhodin, Helge %A Pons-Moll, Gerard %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T XNect: Real-time Multi-person 3D Motion Capture with a Single RGB Camera : %G eng %U http://hdl.handle.net/21.11116/0000-000F-796D-0 %R 10.1145/3386569.3392410 %D 2020 %J ACM Transactions on Graphics %V 39 %N 4 %& 1 %P 1 - 17 %Z sequence number: 82 %I Association for Computing Machinery %C New York, NY %@ false
Mallikarjun B R, Tewari, A., Seidel, H.-P., Elgharib, M., and Theobalt, C. 2020a. Learning Complete 3D Morphable Face Models from Images and Videos. https://arxiv.org/abs/2010.01679.
(arXiv: 2010.01679)
Abstract
Most 3D face reconstruction methods rely on 3D morphable models, which<br>disentangle the space of facial deformations into identity geometry,<br>expressions and skin reflectance. These models are typically learned from a<br>limited number of 3D scans and thus do not generalize well across different<br>identities and expressions. We present the first approach to learn complete 3D<br>models of face identity geometry, albedo and expression just from images and<br>videos. The virtually endless collection of such data, in combination with our<br>self-supervised learning-based approach allows for learning face models that<br>generalize beyond the span of existing approaches. Our network design and loss<br>functions ensure a disentangled parameterization of not only identity and<br>albedo, but also, for the first time, an expression basis. Our method also<br>allows for in-the-wild monocular reconstruction at test time. We show that our<br>learned models better generalize and lead to higher quality image-based<br>reconstructions than existing approaches.<br>
Export
BibTeX
@online{Mallikarjun_arXiv2010.01679, TITLE = {Learning Complete {3D} Morphable Face Models from Images and Videos}, AUTHOR = {Mallikarjun B R and Tewari, Ayush and Seidel, Hans-Peter and Elgharib, Mohamed and Theobalt, Christian}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2010.01679}, EPRINT = {2010.01679}, EPRINTTYPE = {arXiv}, YEAR = {2020}, ABSTRACT = {Most 3D face reconstruction methods rely on 3D morphable models, which<br>disentangle the space of facial deformations into identity geometry,<br>expressions and skin reflectance. These models are typically learned from a<br>limited number of 3D scans and thus do not generalize well across different<br>identities and expressions. We present the first approach to learn complete 3D<br>models of face identity geometry, albedo and expression just from images and<br>videos. The virtually endless collection of such data, in combination with our<br>self-supervised learning-based approach allows for learning face models that<br>generalize beyond the span of existing approaches. Our network design and loss<br>functions ensure a disentangled parameterization of not only identity and<br>albedo, but also, for the first time, an expression basis. Our method also<br>allows for in-the-wild monocular reconstruction at test time. We show that our<br>learned models better generalize and lead to higher quality image-based<br>reconstructions than existing approaches.<br>}, }
Endnote
%0 Report %A Mallikarjun B R, %A Tewari, Ayush %A Seidel, Hans-Peter %A Elgharib, Mohamed %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Learning Complete 3D Morphable Face Models from Images and Videos : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B6FB-1 %U https://arxiv.org/abs/2010.01679 %D 2020 %X Most 3D face reconstruction methods rely on 3D morphable models, which<br>disentangle the space of facial deformations into identity geometry,<br>expressions and skin reflectance. These models are typically learned from a<br>limited number of 3D scans and thus do not generalize well across different<br>identities and expressions. We present the first approach to learn complete 3D<br>models of face identity geometry, albedo and expression just from images and<br>videos. The virtually endless collection of such data, in combination with our<br>self-supervised learning-based approach allows for learning face models that<br>generalize beyond the span of existing approaches. Our network design and loss<br>functions ensure a disentangled parameterization of not only identity and<br>albedo, but also, for the first time, an expression basis. Our method also<br>allows for in-the-wild monocular reconstruction at test time. We show that our<br>learned models better generalize and lead to higher quality image-based<br>reconstructions than existing approaches.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Artificial Intelligence, cs.AI,Computer Science, Graphics, cs.GR,Computer Science, Learning, cs.LG,Computer Science, Multimedia, cs.MM
Mallikarjun B R, Tewari, A., Oh, T.-H., et al. 2020b. Monocular Reconstruction of Neural Face Reflectance Fields. https://arxiv.org/abs/2008.10247.
(arXiv: 2008.10247)
Abstract
The reflectance field of a face describes the reflectance properties<br>responsible for complex lighting effects including diffuse, specular,<br>inter-reflection and self shadowing. Most existing methods for estimating the<br>face reflectance from a monocular image assume faces to be diffuse with very<br>few approaches adding a specular component. This still leaves out important<br>perceptual aspects of reflectance as higher-order global illumination effects<br>and self-shadowing are not modeled. We present a new neural representation for<br>face reflectance where we can estimate all components of the reflectance<br>responsible for the final appearance from a single monocular image. Instead of<br>modeling each component of the reflectance separately using parametric models,<br>our neural representation allows us to generate a basis set of faces in a<br>geometric deformation-invariant space, parameterized by the input light<br>direction, viewpoint and face geometry. We learn to reconstruct this<br>reflectance field of a face just from a monocular image, which can be used to<br>render the face from any viewpoint in any light condition. Our method is<br>trained on a light-stage training dataset, which captures 300 people<br>illuminated with 150 light conditions from 8 viewpoints. We show that our<br>method outperforms existing monocular reflectance reconstruction methods, in<br>terms of photorealism due to better capturing of physical premitives, such as<br>sub-surface scattering, specularities, self-shadows and other higher-order<br>effects.<br>
Export
BibTeX
@online{Mallikarjun_2008.10247, TITLE = {Monocular Reconstruction of Neural Face Reflectance Fields}, AUTHOR = {Mallikarjun B R and Tewari, Ayush and Oh, Tae-Hyun and Weyrich, Tim and Bickel, Bernd and Seidel, Hans-Peter and Pfister, Hanspeter and Matusik, Wojciech and Elgharib, Mohamed and Theobalt, Christian}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2008.10247}, EPRINT = {2008.10247}, EPRINTTYPE = {arXiv}, YEAR = {2020}, ABSTRACT = {The reflectance field of a face describes the reflectance properties<br>responsible for complex lighting effects including diffuse, specular,<br>inter-reflection and self shadowing. Most existing methods for estimating the<br>face reflectance from a monocular image assume faces to be diffuse with very<br>few approaches adding a specular component. This still leaves out important<br>perceptual aspects of reflectance as higher-order global illumination effects<br>and self-shadowing are not modeled. We present a new neural representation for<br>face reflectance where we can estimate all components of the reflectance<br>responsible for the final appearance from a single monocular image. Instead of<br>modeling each component of the reflectance separately using parametric models,<br>our neural representation allows us to generate a basis set of faces in a<br>geometric deformation-invariant space, parameterized by the input light<br>direction, viewpoint and face geometry. We learn to reconstruct this<br>reflectance field of a face just from a monocular image, which can be used to<br>render the face from any viewpoint in any light condition. Our method is<br>trained on a light-stage training dataset, which captures 300 people<br>illuminated with 150 light conditions from 8 viewpoints. We show that our<br>method outperforms existing monocular reflectance reconstruction methods, in<br>terms of photorealism due to better capturing of physical premitives, such as<br>sub-surface scattering, specularities, self-shadows and other higher-order<br>effects.<br>}, }
Endnote
%0 Report %A Mallikarjun B R, %A Tewari, Ayush %A Oh, Tae-Hyun %A Weyrich, Tim %A Bickel, Bernd %A Seidel, Hans-Peter %A Pfister, Hanspeter %A Matusik, Wojciech %A Elgharib, Mohamed %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Monocular Reconstruction of Neural Face Reflectance Fields : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B110-E %U https://arxiv.org/abs/2008.10247 %D 2020 %X The reflectance field of a face describes the reflectance properties<br>responsible for complex lighting effects including diffuse, specular,<br>inter-reflection and self shadowing. Most existing methods for estimating the<br>face reflectance from a monocular image assume faces to be diffuse with very<br>few approaches adding a specular component. This still leaves out important<br>perceptual aspects of reflectance as higher-order global illumination effects<br>and self-shadowing are not modeled. We present a new neural representation for<br>face reflectance where we can estimate all components of the reflectance<br>responsible for the final appearance from a single monocular image. Instead of<br>modeling each component of the reflectance separately using parametric models,<br>our neural representation allows us to generate a basis set of faces in a<br>geometric deformation-invariant space, parameterized by the input light<br>direction, viewpoint and face geometry. We learn to reconstruct this<br>reflectance field of a face just from a monocular image, which can be used to<br>render the face from any viewpoint in any light condition. Our method is<br>trained on a light-stage training dataset, which captures 300 people<br>illuminated with 150 light conditions from 8 viewpoints. We show that our<br>method outperforms existing monocular reflectance reconstruction methods, in<br>terms of photorealism due to better capturing of physical premitives, such as<br>sub-surface scattering, specularities, self-shadows and other higher-order<br>effects.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR,Computer Science, Learning, cs.LG
Kappel, M., Golyanik, V., Elgharib, M., et al. 2020. High-Fidelity Neural Human Motion Transfer from Monocular Video. https://arxiv.org/abs/2012.10974.
(arXiv: 2012.10974)
Abstract
Video-based human motion transfer creates video animations of humans<br>following a source motion. Current methods show remarkable results for<br>tightly-clad subjects. However, the lack of temporally consistent handling of<br>plausible clothing dynamics, including fine and high-frequency details,<br>significantly limits the attainable visual quality. We address these<br>limitations for the first time in the literature and present a new framework<br>which performs high-fidelity and temporally-consistent human motion transfer<br>with natural pose-dependent non-rigid deformations, for several types of loose<br>garments. In contrast to the previous techniques, we perform image generation<br>in three subsequent stages, synthesizing human shape, structure, and<br>appearance. Given a monocular RGB video of an actor, we train a stack of<br>recurrent deep neural networks that generate these intermediate representations<br>from 2D poses and their temporal derivatives. Splitting the difficult motion<br>transfer problem into subtasks that are aware of the temporal motion context<br>helps us to synthesize results with plausible dynamics and pose-dependent<br>detail. It also allows artistic control of results by manipulation of<br>individual framework stages. In the experimental results, we significantly<br>outperform the state-of-the-art in terms of video realism. Our code and data<br>will be made publicly available.<br>
Export
BibTeX
@online{Kappel_arXiv2012.10974, TITLE = {High-Fidelity Neural Human Motion Transfer from Monocular Video}, AUTHOR = {Kappel, Moritz and Golyanik, Vladislav and Elgharib, Mohamed and Henningson, Jann-Ole and Seidel, Hans-Peter and Castillo, Susana and Theobalt, Christian and Magnor, Marcus A.}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2012.10974}, EPRINT = {2012.10974}, EPRINTTYPE = {arXiv}, YEAR = {2020}, ABSTRACT = {Video-based human motion transfer creates video animations of humans<br>following a source motion. Current methods show remarkable results for<br>tightly-clad subjects. However, the lack of temporally consistent handling of<br>plausible clothing dynamics, including fine and high-frequency details,<br>significantly limits the attainable visual quality. We address these<br>limitations for the first time in the literature and present a new framework<br>which performs high-fidelity and temporally-consistent human motion transfer<br>with natural pose-dependent non-rigid deformations, for several types of loose<br>garments. In contrast to the previous techniques, we perform image generation<br>in three subsequent stages, synthesizing human shape, structure, and<br>appearance. Given a monocular RGB video of an actor, we train a stack of<br>recurrent deep neural networks that generate these intermediate representations<br>from 2D poses and their temporal derivatives. Splitting the difficult motion<br>transfer problem into subtasks that are aware of the temporal motion context<br>helps us to synthesize results with plausible dynamics and pose-dependent<br>detail. It also allows artistic control of results by manipulation of<br>individual framework stages. In the experimental results, we significantly<br>outperform the state-of-the-art in terms of video realism. Our code and data<br>will be made publicly available.<br>}, }
Endnote
%0 Report %A Kappel, Moritz %A Golyanik, Vladislav %A Elgharib, Mohamed %A Henningson, Jann-Ole %A Seidel, Hans-Peter %A Castillo, Susana %A Theobalt, Christian %A Magnor, Marcus A. %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T High-Fidelity Neural Human Motion Transfer from Monocular Video : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B715-3 %U https://arxiv.org/abs/2012.10974 %D 2020 %X Video-based human motion transfer creates video animations of humans<br>following a source motion. Current methods show remarkable results for<br>tightly-clad subjects. However, the lack of temporally consistent handling of<br>plausible clothing dynamics, including fine and high-frequency details,<br>significantly limits the attainable visual quality. We address these<br>limitations for the first time in the literature and present a new framework<br>which performs high-fidelity and temporally-consistent human motion transfer<br>with natural pose-dependent non-rigid deformations, for several types of loose<br>garments. In contrast to the previous techniques, we perform image generation<br>in three subsequent stages, synthesizing human shape, structure, and<br>appearance. Given a monocular RGB video of an actor, we train a stack of<br>recurrent deep neural networks that generate these intermediate representations<br>from 2D poses and their temporal derivatives. Splitting the difficult motion<br>transfer problem into subtasks that are aware of the temporal motion context<br>helps us to synthesize results with plausible dynamics and pose-dependent<br>detail. It also allows artistic control of results by manipulation of<br>individual framework stages. In the experimental results, we significantly<br>outperform the state-of-the-art in terms of video realism. Our code and data<br>will be made publicly available.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR,Computer Science, Learning, cs.LG
Fox, G., Liu, W., Kim, H., Seidel, H.-P., Elgharib, M., and Theobalt, C. 2020. VideoForensicsHQ: Detecting High-quality Manipulated Face Videos. https://arxiv.org/abs/2005.10360.
(arXiv: 2005.10360)
Abstract
New approaches to synthesize and manipulate face videos at very high quality<br>have paved the way for new applications in computer animation, virtual and<br>augmented reality, or face video analysis. However, there are concerns that<br>they may be used in a malicious way, e.g. to manipulate videos of public<br>figures, politicians or reporters, to spread false information. The research<br>community therefore developed techniques for automated detection of modified<br>imagery, and assembled benchmark datasets showing manipulatons by<br>state-of-the-art techniques. In this paper, we contribute to this initiative in<br>two ways: First, we present a new audio-visual benchmark dataset. It shows some<br>of the highest quality visual manipulations available today. Human observers<br>find them significantly harder to identify as forged than videos from other<br>benchmarks. Furthermore we propose new family of deep-learning-based fake<br>detectors, demonstrating that existing detectors are not well-suited for<br>detecting fakes of a quality as high as presented in our dataset. Our detectors<br>examine spatial and temporal features. This allows them to outperform existing<br>approaches both in terms of high detection accuracy and generalization to<br>unseen fake generation methods and unseen identities.<br>
Export
BibTeX
@online{Fox_2005.10360, TITLE = {{Video\-Foren\-sics\-HQ}: {D}etecting High-quality Manipulated Face Videos}, AUTHOR = {Fox, Gereon and Liu, Wentao and Kim, Hyeongwoo and Seidel, Hans-Peter and Elgharib, Mohamed and Theobalt, Christian}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2005.10360}, EPRINT = {2005.10360}, EPRINTTYPE = {arXiv}, YEAR = {2020}, ABSTRACT = {New approaches to synthesize and manipulate face videos at very high quality<br>have paved the way for new applications in computer animation, virtual and<br>augmented reality, or face video analysis. However, there are concerns that<br>they may be used in a malicious way, e.g. to manipulate videos of public<br>figures, politicians or reporters, to spread false information. The research<br>community therefore developed techniques for automated detection of modified<br>imagery, and assembled benchmark datasets showing manipulatons by<br>state-of-the-art techniques. In this paper, we contribute to this initiative in<br>two ways: First, we present a new audio-visual benchmark dataset. It shows some<br>of the highest quality visual manipulations available today. Human observers<br>find them significantly harder to identify as forged than videos from other<br>benchmarks. Furthermore we propose new family of deep-learning-based fake<br>detectors, demonstrating that existing detectors are not well-suited for<br>detecting fakes of a quality as high as presented in our dataset. Our detectors<br>examine spatial and temporal features. This allows them to outperform existing<br>approaches both in terms of high detection accuracy and generalization to<br>unseen fake generation methods and unseen identities.<br>}, }
Endnote
%0 Report %A Fox, Gereon %A Liu, Wentao %A Kim, Hyeongwoo %A Seidel, Hans-Peter %A Elgharib, Mohamed %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T VideoForensicsHQ: Detecting High-quality Manipulated Face Videos : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B109-7 %U https://arxiv.org/abs/2005.10360 %D 2020 %X New approaches to synthesize and manipulate face videos at very high quality<br>have paved the way for new applications in computer animation, virtual and<br>augmented reality, or face video analysis. However, there are concerns that<br>they may be used in a malicious way, e.g. to manipulate videos of public<br>figures, politicians or reporters, to spread false information. The research<br>community therefore developed techniques for automated detection of modified<br>imagery, and assembled benchmark datasets showing manipulatons by<br>state-of-the-art techniques. In this paper, we contribute to this initiative in<br>two ways: First, we present a new audio-visual benchmark dataset. It shows some<br>of the highest quality visual manipulations available today. Human observers<br>find them significantly harder to identify as forged than videos from other<br>benchmarks. Furthermore we propose new family of deep-learning-based fake<br>detectors, demonstrating that existing detectors are not well-suited for<br>detecting fakes of a quality as high as presented in our dataset. Our detectors<br>examine spatial and temporal features. This allows them to outperform existing<br>approaches both in terms of high detection accuracy and generalization to<br>unseen fake generation methods and unseen identities.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Elgharib, M., Mendiratta, M., Thies, J., et al. 2020. Egocentric Videoconferencing. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2020)39, 6.
Export
BibTeX
@article{Elgharib_ToG2020, TITLE = {Egocentric Videoconferencing}, AUTHOR = {Elgharib, Mohamed and Mendiratta, Mohit and Thies, Justus and Nie{\ss}ner, Matthias and Seidel, Hans-Peter and Tewari, Ayush and Golyanik, Vladislav and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3414685.3417808}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2020}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {39}, NUMBER = {6}, EID = {268}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2020}, EDITOR = {Myszkowski, Karol}, }
Endnote
%0 Journal Article %A Elgharib, Mohamed %A Mendiratta, Mohit %A Thies, Justus %A Nie&#223;ner, Matthias %A Seidel, Hans-Peter %A Tewari, Ayush %A Golyanik, Vladislav %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Egocentric Videoconferencing : %G eng %U http://hdl.handle.net/21.11116/0000-0007-9B36-E %R 10.1145/3414685.3417808 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 6 %Z sequence number: 268 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2020 %O ACM SIGGRAPH Asia 2020 SA'20 SA 2020
Cucerca, S., Didyk, P., Seidel, H.-P., and Babaei, V. 2020. Computational Image Marking on Metals via Laser Induced Heating. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2020)39, 4.
Export
BibTeX
@article{Cucerca_SIGGRAPH2020, TITLE = {Computational Image Marking on Metals via Laser Induced Heating}, AUTHOR = {Cucerca, Sebastian and Didyk, Piotr and Seidel, Hans-Peter and Babaei, Vahid}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3386569.3392423}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2020}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {39}, NUMBER = {4}, EID = {70}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2020}, }
Endnote
%0 Journal Article %A Cucerca, Sebastian %A Didyk, Piotr %A Seidel, Hans-Peter %A Babaei, Vahid %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Computational Image Marking on Metals via Laser Induced Heating : %G eng %U http://hdl.handle.net/21.11116/0000-0007-9664-F %R 10.1145/3386569.3392423 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 4 %Z sequence number: 70 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2020 %O ACM SIGGRAPH 2020 Virtual Conference ; 2020, 17-28 August
Çoğalan, U., Bemana, M., Myszkowski, K., Seidel, H.-P., and Ritschel, T. 2020. HDR Denoising and Deblurring by Learning Spatio-temporal Distortion Models. https://arxiv.org/abs/2012.12009.
(arXiv: 2012.12009)
Abstract
We seek to reconstruct sharp and noise-free high-dynamic range (HDR) video<br>from a dual-exposure sensor that records different low-dynamic range (LDR)<br>information in different pixel columns: Odd columns provide low-exposure,<br>sharp, but noisy information; even columns complement this with less noisy,<br>high-exposure, but motion-blurred data. Previous LDR work learns to deblur and<br>denoise (DISTORTED->CLEAN) supervised by pairs of CLEAN and DISTORTED images.<br>Regrettably, capturing DISTORTED sensor readings is time-consuming; as well,<br>there is a lack of CLEAN HDR videos. We suggest a method to overcome those two<br>limitations. First, we learn a different function instead: CLEAN->DISTORTED,<br>which generates samples containing correlated pixel noise, and row and column<br>noise, as well as motion blur from a low number of CLEAN sensor readings.<br>Second, as there is not enough CLEAN HDR video available, we devise a method to<br>learn from LDR video in-stead. Our approach compares favorably to several<br>strong baselines, and can boost existing methods when they are re-trained on<br>our data. Combined with spatial and temporal super-resolution, it enables<br>applications such as re-lighting with low noise or blur.<br>
Export
BibTeX
@online{Cogalan_arXiv2012.12009, TITLE = {{HDR} Denoising and Deblurring by Learning Spatio-temporal Distortion Model}, AUTHOR = {{\c C}o{\u g}alan, U{\u g}ur and Bemana, Mojtaba and Myszkowski, Karol and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2012.12009}, EPRINT = {2012.12009}, EPRINTTYPE = {arXiv}, YEAR = {2020}, ABSTRACT = {We seek to reconstruct sharp and noise-free high-dynamic range (HDR) video<br>from a dual-exposure sensor that records different low-dynamic range (LDR)<br>information in different pixel columns: Odd columns provide low-exposure,<br>sharp, but noisy information; even columns complement this with less noisy,<br>high-exposure, but motion-blurred data. Previous LDR work learns to deblur and<br>denoise (DISTORTED->CLEAN) supervised by pairs of CLEAN and DISTORTED images.<br>Regrettably, capturing DISTORTED sensor readings is time-consuming; as well,<br>there is a lack of CLEAN HDR videos. We suggest a method to overcome those two<br>limitations. First, we learn a different function instead: CLEAN->DISTORTED,<br>which generates samples containing correlated pixel noise, and row and column<br>noise, as well as motion blur from a low number of CLEAN sensor readings.<br>Second, as there is not enough CLEAN HDR video available, we devise a method to<br>learn from LDR video in-stead. Our approach compares favorably to several<br>strong baselines, and can boost existing methods when they are re-trained on<br>our data. Combined with spatial and temporal super-resolution, it enables<br>applications such as re-lighting with low noise or blur.<br>}, }
Endnote
%0 Report %A &#199;o&#287;alan, U&#287;ur %A Bemana, Mojtaba %A Myszkowski, Karol %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T HDR Denoising and Deblurring by Learning Spatio-temporal Distortion Models : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B721-5 %U https://arxiv.org/abs/2012.12009 %D 2020 %X We seek to reconstruct sharp and noise-free high-dynamic range (HDR) video<br>from a dual-exposure sensor that records different low-dynamic range (LDR)<br>information in different pixel columns: Odd columns provide low-exposure,<br>sharp, but noisy information; even columns complement this with less noisy,<br>high-exposure, but motion-blurred data. Previous LDR work learns to deblur and<br>denoise (DISTORTED->CLEAN) supervised by pairs of CLEAN and DISTORTED images.<br>Regrettably, capturing DISTORTED sensor readings is time-consuming; as well,<br>there is a lack of CLEAN HDR videos. We suggest a method to overcome those two<br>limitations. First, we learn a different function instead: CLEAN->DISTORTED,<br>which generates samples containing correlated pixel noise, and row and column<br>noise, as well as motion blur from a low number of CLEAN sensor readings.<br>Second, as there is not enough CLEAN HDR video available, we devise a method to<br>learn from LDR video in-stead. Our approach compares favorably to several<br>strong baselines, and can boost existing methods when they are re-trained on<br>our data. Combined with spatial and temporal super-resolution, it enables<br>applications such as re-lighting with low noise or blur.<br> %K eess.IV,Computer Science, Computer Vision and Pattern Recognition, cs.CV
Bemana, M., Myszkowski, K., Seidel, H.-P., and Ritschel, T. 2020a. X-Fields: Implicit Neural View-, Light- and Time-Image Interpolation. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2020)39, 6.
Export
BibTeX
@article{Bemana2020, TITLE = {X-{F}ields: {I}mplicit Neural View-, Light- and Time-Image Interpolation}, AUTHOR = {Bemana, Mojtaba and Myszkowski, Karol and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3414685.3417827}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2020}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {39}, NUMBER = {6}, EID = {257}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2020}, EDITOR = {Myszkowski, Karol}, }
Endnote
%0 Journal Article %A Bemana, Mojtaba %A Myszkowski, Karol %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T X-Fields: Implicit Neural View-, Light- and Time-Image Interpolation : %G eng %U http://hdl.handle.net/21.11116/0000-0006-FBF0-0 %R 10.1145/3414685.3417827 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 6 %Z sequence number: 257 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2020 %O ACM SIGGRAPH Asia 2020 SA'20 SA 2020
Bemana, M., Myszkowski, K., Seidel, H.-P., and Ritschel, T. 2020b. X-Fields: Implicit Neural View-, Light- and Time-Image Interpolation. https://arxiv.org/abs/2010.00450.
(arXiv: 2010.00450)
Abstract
We suggest to represent an X-Field -a set of 2D images taken across different<br>view, time or illumination conditions, i.e., video, light field, reflectance<br>fields or combinations thereof-by learning a neural network (NN) to map their<br>view, time or light coordinates to 2D images. Executing this NN at new<br>coordinates results in joint view, time or light interpolation. The key idea to<br>make this workable is a NN that already knows the "basic tricks" of graphics<br>(lighting, 3D projection, occlusion) in a hard-coded and differentiable form.<br>The NN represents the input to that rendering as an implicit map, that for any<br>view, time, or light coordinate and for any pixel can quantify how it will move<br>if view, time or light coordinates change (Jacobian of pixel position with<br>respect to view, time, illumination, etc.). Our X-Field representation is<br>trained for one scene within minutes, leading to a compact set of trainable<br>parameters and hence real-time navigation in view, time and illumination.<br>
Export
BibTeX
@online{Bemana_arXiv2010.00450, TITLE = {X-Fields: Implicit Neural View-, Light- and Time-Image Interpolation}, AUTHOR = {Bemana, Mojtaba and Myszkowski, Karol and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2010.00450}, EPRINT = {2010.00450}, EPRINTTYPE = {arXiv}, YEAR = {2020}, ABSTRACT = {We suggest to represent an X-Field -a set of 2D images taken across different<br>view, time or illumination conditions, i.e., video, light field, reflectance<br>fields or combinations thereof-by learning a neural network (NN) to map their<br>view, time or light coordinates to 2D images. Executing this NN at new<br>coordinates results in joint view, time or light interpolation. The key idea to<br>make this workable is a NN that already knows the "basic tricks" of graphics<br>(lighting, 3D projection, occlusion) in a hard-coded and differentiable form.<br>The NN represents the input to that rendering as an implicit map, that for any<br>view, time, or light coordinate and for any pixel can quantify how it will move<br>if view, time or light coordinates change (Jacobian of pixel position with<br>respect to view, time, illumination, etc.). Our X-Field representation is<br>trained for one scene within minutes, leading to a compact set of trainable<br>parameters and hence real-time navigation in view, time and illumination.<br>}, }
Endnote
%0 Report %A Bemana, Mojtaba %A Myszkowski, Karol %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T X-Fields: Implicit Neural View-, Light- and Time-Image Interpolation : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B6EC-2 %U https://arxiv.org/abs/2010.00450 %D 2020 %X We suggest to represent an X-Field -a set of 2D images taken across different<br>view, time or illumination conditions, i.e., video, light field, reflectance<br>fields or combinations thereof-by learning a neural network (NN) to map their<br>view, time or light coordinates to 2D images. Executing this NN at new<br>coordinates results in joint view, time or light interpolation. The key idea to<br>make this workable is a NN that already knows the "basic tricks" of graphics<br>(lighting, 3D projection, occlusion) in a hard-coded and differentiable form.<br>The NN represents the input to that rendering as an implicit map, that for any<br>view, time, or light coordinate and for any pixel can quantify how it will move<br>if view, time or light coordinates change (Jacobian of pixel position with<br>respect to view, time, illumination, etc.). Our X-Field representation is<br>trained for one scene within minutes, leading to a compact set of trainable<br>parameters and hence real-time navigation in view, time and illumination.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR
Ansari, N., Alizadeh-Mousavi, O., Seidel, H.-P., and Babaei, V. 2020. Mixed Integer Ink Selection for Spectral Reproduction. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2020)39, 6.
Export
BibTeX
@article{Ansari_ToG2020, TITLE = {Mixed Integer Ink Selection for Spectral Reproduction}, AUTHOR = {Ansari, Navid and Alizadeh-Mousavi, Omid and Seidel, Hans-Peter and Babaei, Vahid}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3414685.3417761}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2020}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {39}, NUMBER = {6}, EID = {255}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2020}, EDITOR = {Myszkowski, Karol}, }
Endnote
%0 Journal Article %A Ansari, Navid %A Alizadeh-Mousavi, Omid %A Seidel, Hans-Peter %A Babaei, Vahid %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Mixed Integer Ink Selection for Spectral Reproduction : %G eng %U http://hdl.handle.net/21.11116/0000-0007-9B23-3 %R 10.1145/3414685.3417761 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 6 %Z sequence number: 255 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2020 %O ACM SIGGRAPH Asia 2020 SA'20 SA 2020
2019
Yu, H., Bemana, M., Wernikowski, M., et al. 2019. A Perception-driven Hybrid Decomposition for Multi-layer Accommodative Displays. IEEE Transactions on Visualization and Computer Graphics (Proc. IEEE VR 2019)25, 5.
Export
BibTeX
@article{Yu_VR2019, TITLE = {A Perception-driven Hybrid Decomposition for Multi-layer Accommodative Displays}, AUTHOR = {Yu, Hyeonseung and Bemana, Mojtaba and Wernikowski, Marek and Chwesiuk, Micha{\l} and Tursun, Okan Tarhan and Singh, Gurprit and Myszkowski, Karol and Mantiuk, Rados{\l}aw and Seidel, Hans-Peter and Didyk, Piotr}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2019.2898821}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {New York, NY}, YEAR = {2019}, DATE = {2019}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics (Proc. IEEE VR)}, VOLUME = {25}, NUMBER = {5}, PAGES = {1940--1950}, BOOKTITLE = {Selected Proceedings IEEE Virtual Reality 2019 (IEEE VR 2019)}, EDITOR = {Thomas, Bruce and Welch, Greg and Kuhlen, Torsten and Johnson, Kyle}, }
Endnote
%0 Journal Article %A Yu, Hyeonseung %A Bemana, Mojtaba %A Wernikowski, Marek %A Chwesiuk, Micha&#322; %A Tursun, Okan Tarhan %A Singh, Gurprit %A Myszkowski, Karol %A Mantiuk, Rados&#322;aw %A Seidel, Hans-Peter %A Didyk, Piotr %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T A Perception-driven Hybrid Decomposition for Multi-layer Accommodative Displays : %G eng %U http://hdl.handle.net/21.11116/0000-0002-DCB5-A %R 10.1109/TVCG.2019.2898821 %7 2019 %D 2019 %J IEEE Transactions on Visualization and Computer Graphics %V 25 %N 5 %& 1940 %P 1940 - 1950 %I IEEE Computer Society %C New York, NY %@ false %B Selected Proceedings IEEE Virtual Reality 2019 %O IEEE VR 2019 Osaka, Japan, 23rd - 27th March
Xu, W., Chatterjee, A., Zollhöfer, M., et al. 2019. Mo2Cap2: Real-time Mobile 3D Motion Capture with a Cap-mounted Fisheye Camera. IEEE Transactions on Visualization and Computer Graphics (Proc. IEEE VR 2019)25, 5.
Export
BibTeX
@article{Xu2019Mo2Cap2, TITLE = {{Mo2Cap2}: Real-time Mobile {3D} Motion Capture with a Cap-mounted Fisheye Camera}, AUTHOR = {Xu, Weipeng and Chatterjee, Avishek and Zollh{\"o}fer, Michael and Rhodin, Helge and Fua, Pascal and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2019.2898650}, PUBLISHER = {IEEE}, ADDRESS = {Piscataway, NJ}, YEAR = {2019}, DATE = {2019}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics (Proc. IEEE VR)}, VOLUME = {25}, NUMBER = {5}, PAGES = {2093--2101}, BOOKTITLE = {Selected Proceedings IEEE Virtual Reality 2019 (IEEE VR 2019)}, }
Endnote
%0 Journal Article %A Xu, Weipeng %A Chatterjee, Avishek %A Zollh&#246;fer, Michael %A Rhodin, Helge %A Fua, Pascal %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Mo2Cap2: Real-time Mobile 3D Motion Capture with a Cap-mounted Fisheye Camera : %G eng %U http://hdl.handle.net/21.11116/0000-0002-F1DB-7 %R 10.1109/TVCG.2019.2898650 %7 2019 %D 2019 %J IEEE Transactions on Visualization and Computer Graphics %V 25 %N 5 %& 2093 %P 2093 - 2101 %I IEEE %C Piscataway, NJ %@ false %B Selected Proceedings IEEE Virtual Reality 2019 %O IEEE VR 2019 Osaka, Japan, March 23rd - 27th
Winter, M., Mlakar, D., Zayer, R., Seidel, H.-P., and Steinberger, M. 2019. Adaptive Sparse Matrix-Matrix Multiplication on the GPU. PPoPP’19, 24th ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming, ACM.
Export
BibTeX
@inproceedings{PPOPP:2019:ASPMM, TITLE = {Adaptive Sparse Matrix-Matrix Multiplication on the {GPU}}, AUTHOR = {Winter, Martin and Mlakar, Daniel and Zayer, Rhaleb and Seidel, Hans-Peter and Steinberger, Markus}, LANGUAGE = {eng}, ISBN = {978-1-4503-6225-2}, DOI = {10.1145/3293883.3295701}, PUBLISHER = {ACM}, YEAR = {2019}, DATE = {2019}, BOOKTITLE = {PPoPP'19, 24th ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming}, PAGES = {68--81}, ADDRESS = {Washington, DC, USA}, }
Endnote
%0 Conference Proceedings %A Winter, Martin %A Mlakar, Daniel %A Zayer, Rhaleb %A Seidel, Hans-Peter %A Steinberger, Markus %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Adaptive Sparse Matrix-Matrix Multiplication on the GPU : %G eng %U http://hdl.handle.net/21.11116/0000-0002-EFE9-B %R 10.1145/3293883.3295701 %D 2019 %B 24th ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming %Z date of event: 2019-02-16 - 2019-02-20 %C Washington, DC, USA %B PPoPP'19 %P 68 - 81 %I ACM %@ 978-1-4503-6225-2
Tursun, O.T., Arabadzhiyska, E., Wernikowski, M., et al. 2019. Luminance-Contrast-Aware Foveated Rendering. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2019)38, 4.
Export
BibTeX
@article{Tursun2019Luminance, TITLE = {Luminance-Contrast-Aware Foveated Rendering}, AUTHOR = {Tursun, Okan Tarhan and Arabadzhiyska, Elena and Wernikowski, Marek and Mantiuk, Rados{\l}aw and Seidel, Hans-Peter and Myszkowski, Karol and Didyk, Piotr}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3306346.3322985}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2019}, DATE = {2019}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {38}, NUMBER = {4}, EID = {98}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2019}, }
Endnote
%0 Journal Article %A Tursun, Okan Tarhan %A Arabadzhiyska, Elena %A Wernikowski, Marek %A Mantiuk, Rados&#322;aw %A Seidel, Hans-Peter %A Myszkowski, Karol %A Didyk, Piotr %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Luminance-Contrast-Aware Foveated Rendering : %G eng %U http://hdl.handle.net/21.11116/0000-0003-75D5-9 %R 10.1145/3306346.3322985 %7 2019 %D 2019 %J ACM Transactions on Graphics %V 38 %N 4 %Z sequence number: 98 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2019 %O ACM SIGGRAPH 2019 Los Angeles, CA, USA, 28 July - 1 August
Tewari, A., Bernard, F., Garrido, P., et al. 2019. FML: Face Model Learning From Videos. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2019), IEEE.
Export
BibTeX
@inproceedings{TewariCVPR2019, TITLE = {{FML}: {F}ace Model Learning From Videos}, AUTHOR = {Tewari, Ayush and Bernard, Florian and Garrido, Pablo and Bharaj, Gaurav and Elgharib, Mohamed and Seidel, Hans-Peter and P{\'e}rez, Patrick and Zollh{\"o}fer, Michael and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-7281-3293-8}, DOI = {10.1109/CVPR.2019.01107}, PUBLISHER = {IEEE}, YEAR = {2019}, BOOKTITLE = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2019)}, PAGES = {10812--10822}, ADDRESS = {Long Beach, CA, USA}, }
Endnote
%0 Conference Proceedings %A Tewari, Ayush %A Bernard, Florian %A Garrido, Pablo %A Bharaj, Gaurav %A Elgharib, Mohamed %A Seidel, Hans-Peter %A P&#233;rez, Patrick %A Zollh&#246;fer, Michael %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T FML: Face Model Learning From Videos : %G eng %U http://hdl.handle.net/21.11116/0000-0005-7B0C-5 %R 10.1109/CVPR.2019.01107 %D 2019 %B 32nd IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2019-06-16 - 2019-06-20 %C Long Beach, CA, USA %B IEEE/CVF Conference on Computer Vision and Pattern Recognition %P 10812 - 10822 %I IEEE %@ 978-1-7281-3293-8
Mehta, D., Sotnychenko, O., Mueller, F., et al. 2019a. XNect Demo (v2): Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera. CVPR 2019 Demonstrations.
Export
BibTeX
@inproceedings{XNectDemoV2_CVPR2019, TITLE = {Demo of {VNect} (v2): {R}eal-time {3D} Human Pose Estimation with a Single {RGB} Camera}, AUTHOR = {Mehta, Dushyant and Sotnychenko, Oleksandr and Mueller, Franziska and Xu, Weipeng and Seidel, Hans-Peter and Fua, Pascal and Elgharib, Mohamed and Rhodin, Helge and Pons-Moll, Gerard and Theobalt, Christian}, LANGUAGE = {eng}, YEAR = {2019}, BOOKTITLE = {CVPR 2019 Demonstrations}, ADDRESS = {Long Beach, CA, USA}, }
Endnote
%0 Conference Proceedings %A Mehta, Dushyant %A Sotnychenko, Oleksandr %A Mueller, Franziska %A Xu, Weipeng %A Seidel, Hans-Peter %A Fua, Pascal %A Elgharib, Mohamed %A Rhodin, Helge %A Pons-Moll, Gerard %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T XNect Demo (v2): Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera : %G eng %U http://hdl.handle.net/21.11116/0000-0004-71DB-6 %D 2019 %B 32nd IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2019-06-16 - 2019-06-20 %C Long Beach, CA, USA %B CVPR 2019 Demonstrations %U http://gvv.mpi-inf.mpg.de/projects/XNectDemoV2/http://gvv.mpi-inf.mpg.de/projects/XNectDemoV2/
Mehta, D., Sotnychenko, O., Mueller, F., et al. 2019b. XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera. http://arxiv.org/abs/1907.00837.
(arXiv: 1907.00837)
Abstract
We present a real-time approach for multi-person 3D motion capture at over 30<br>fps using a single RGB camera. It operates in generic scenes and is robust to<br>difficult occlusions both by other people and objects. Our method operates in<br>subsequent stages. The first stage is a convolutional neural network (CNN) that<br>estimates 2D and 3D pose features along with identity assignments for all<br>visible joints of all individuals. We contribute a new architecture for this<br>CNN, called SelecSLS Net, that uses novel selective long and short range skip<br>connections to improve the information flow allowing for a drastically faster<br>network without compromising accuracy. In the second stage, a fully-connected<br>neural network turns the possibly partial (on account of occlusion) 2D pose and<br>3D pose features for each subject into a complete 3D pose estimate per<br>individual. The third stage applies space-time skeletal model fitting to the<br>predicted 2D and 3D pose per subject to further reconcile the 2D and 3D pose,<br>and enforce temporal coherence. Our method returns the full skeletal pose in<br>joint angles for each subject. This is a further key distinction from previous<br>work that neither extracted global body positions nor joint angle results of a<br>coherent skeleton in real time for multi-person scenes. The proposed system<br>runs on consumer hardware at a previously unseen speed of more than 30 fps<br>given 512x320 images as input while achieving state-of-the-art accuracy, which<br>we will demonstrate on a range of challenging real-world scenes.<br>
Export
BibTeX
@online{Mehta_arXiv1907.00837, TITLE = {{XNect}: Real-time Multi-person {3D} Human Pose Estimation with a Single {RGB} Camera}, AUTHOR = {Mehta, Dushyant and Sotnychenko, Oleksandr and Mueller, Franziska and Xu, Weipeng and Elgharib, Mohamed and Fua, Pascal and Seidel, Hans-Peter and Rhodin, Helge and Pons-Moll, Gerard and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1907.00837}, EPRINT = {1907.00837}, EPRINTTYPE = {arXiv}, YEAR = {2019}, ABSTRACT = {We present a real-time approach for multi-person 3D motion capture at over 30<br>fps using a single RGB camera. It operates in generic scenes and is robust to<br>difficult occlusions both by other people and objects. Our method operates in<br>subsequent stages. The first stage is a convolutional neural network (CNN) that<br>estimates 2D and 3D pose features along with identity assignments for all<br>visible joints of all individuals. We contribute a new architecture for this<br>CNN, called SelecSLS Net, that uses novel selective long and short range skip<br>connections to improve the information flow allowing for a drastically faster<br>network without compromising accuracy. In the second stage, a fully-connected<br>neural network turns the possibly partial (on account of occlusion) 2D pose and<br>3D pose features for each subject into a complete 3D pose estimate per<br>individual. The third stage applies space-time skeletal model fitting to the<br>predicted 2D and 3D pose per subject to further reconcile the 2D and 3D pose,<br>and enforce temporal coherence. Our method returns the full skeletal pose in<br>joint angles for each subject. This is a further key distinction from previous<br>work that neither extracted global body positions nor joint angle results of a<br>coherent skeleton in real time for multi-person scenes. The proposed system<br>runs on consumer hardware at a previously unseen speed of more than 30 fps<br>given 512x320 images as input while achieving state-of-the-art accuracy, which<br>we will demonstrate on a range of challenging real-world scenes.<br>}, }
Endnote
%0 Report %A Mehta, Dushyant %A Sotnychenko, Oleksandr %A Mueller, Franziska %A Xu, Weipeng %A Elgharib, Mohamed %A Fua, Pascal %A Seidel, Hans-Peter %A Rhodin, Helge %A Pons-Moll, Gerard %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera : %G eng %U http://hdl.handle.net/21.11116/0000-0003-FE21-A %U http://arxiv.org/abs/1907.00837 %D 2019 %X We present a real-time approach for multi-person 3D motion capture at over 30<br>fps using a single RGB camera. It operates in generic scenes and is robust to<br>difficult occlusions both by other people and objects. Our method operates in<br>subsequent stages. The first stage is a convolutional neural network (CNN) that<br>estimates 2D and 3D pose features along with identity assignments for all<br>visible joints of all individuals. We contribute a new architecture for this<br>CNN, called SelecSLS Net, that uses novel selective long and short range skip<br>connections to improve the information flow allowing for a drastically faster<br>network without compromising accuracy. In the second stage, a fully-connected<br>neural network turns the possibly partial (on account of occlusion) 2D pose and<br>3D pose features for each subject into a complete 3D pose estimate per<br>individual. The third stage applies space-time skeletal model fitting to the<br>predicted 2D and 3D pose per subject to further reconcile the 2D and 3D pose,<br>and enforce temporal coherence. Our method returns the full skeletal pose in<br>joint angles for each subject. This is a further key distinction from previous<br>work that neither extracted global body positions nor joint angle results of a<br>coherent skeleton in real time for multi-person scenes. The proposed system<br>runs on consumer hardware at a previously unseen speed of more than 30 fps<br>given 512x320 images as input while achieving state-of-the-art accuracy, which<br>we will demonstrate on a range of challenging real-world scenes.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR
Leimkühler, T., Singh, G., Myszkowski, K., Seidel, H.-P., and Ritschel, T. 2019. Deep Point Correlation Design. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2019)38, 6.
Export
BibTeX
@article{Leimkuehler_SA2019, TITLE = {Deep Point Correlation Design}, AUTHOR = {Leimk{\"u}hler, Thomas and Singh, Gurprit and Myszkowski, Karol and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3355089.3356562}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2019}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {38}, NUMBER = {6}, EID = {226}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2019}, }
Endnote
%0 Journal Article %A Leimk&#252;hler, Thomas %A Singh, Gurprit %A Myszkowski, Karol %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Deep Point Correlation Design : %G eng %U http://hdl.handle.net/21.11116/0000-0004-9BF3-B %R 10.1145/3355089.3356562 %7 2019 %D 2019 %J ACM Transactions on Graphics %V 38 %N 6 %Z sequence number: 226 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2019 %O ACM SIGGRAPH Asia 2019 Brisbane, Australia, 17 - 20 November 2019 SA'19 SA 2019
Kim, H., Elgharib, M., Zollhöfer, M., et al. 2019. Neural Style-preserving Visual Dubbing. ACM Transactions on Graphics38, 6.
Export
BibTeX
@article{Kim2019, TITLE = {Neural Style-preserving Visual Dubbing}, AUTHOR = {Kim, Hyeongwoo and Elgharib, Mohamed and Zollh{\"o}fer, Michael and Seidel, Hans-Peter and Beeler, Thabo and Richardt, Christian and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3355089.3356500}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2019}, DATE = {2019}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {38}, NUMBER = {6}, EID = {178}, }
Endnote
%0 Journal Article %A Kim, Hyeongwoo %A Elgharib, Mohamed %A Zollh&#246;fer, Michael %A Seidel, Hans-Peter %A Beeler, Thabo %A Richardt, Christian %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Neural Style-preserving Visual Dubbing : %G eng %U http://hdl.handle.net/21.11116/0000-0005-6AC0-B %R 10.1145/3355089.3356500 %7 2019 %D 2019 %J ACM Transactions on Graphics %V 38 %N 6 %Z sequence number: 178 %I ACM %C New York, NY %@ false
Jiang, C., Tang, C., Seidel, H.-P., Chen, R., and Wonka, P. 2019. Computational Design of Lightweight Trusses. http://arxiv.org/abs/1901.05637.
(arXiv: 1901.05637)
Abstract
Trusses are load-carrying light-weight structures consisting of bars<br>connected at joints ubiquitously applied in a variety of engineering scenarios.<br>Designing optimal trusses that satisfy functional specifications with a minimal<br>amount of material has interested both theoreticians and practitioners for more<br>than a century. In this paper, we introduce two main ideas to improve upon the<br>state of the art. First, we formulate an alternating linear programming problem<br>for geometry optimization. Second, we introduce two sets of complementary<br>topological operations, including a novel subdivision scheme for global<br>topology refinement inspired by Michell's famed theoretical study. Based on<br>these two ideas, we build an efficient computational framework for the design<br>of lightweight trusses. \AD{We illustrate our framework with a variety of<br>functional specifications and extensions. We show that our method achieves<br>trusses with smaller volumes and is over two orders of magnitude faster<br>compared with recent state-of-the-art approaches.<br>
Export
BibTeX
@online{Jiang_arXIv1901.05637, TITLE = {Computational Design of Lightweight Trusses}, AUTHOR = {Jiang, Caigui and Tang, Chengcheng and Seidel, Hans-Peter and Chen, Renjie and Wonka, Peter}, URL = {http://arxiv.org/abs/1901.05637}, EPRINT = {1901.05637}, EPRINTTYPE = {arXiv}, YEAR = {2019}, ABSTRACT = {Trusses are load-carrying light-weight structures consisting of bars<br>connected at joints ubiquitously applied in a variety of engineering scenarios.<br>Designing optimal trusses that satisfy functional specifications with a minimal<br>amount of material has interested both theoreticians and practitioners for more<br>than a century. In this paper, we introduce two main ideas to improve upon the<br>state of the art. First, we formulate an alternating linear programming problem<br>for geometry optimization. Second, we introduce two sets of complementary<br>topological operations, including a novel subdivision scheme for global<br>topology refinement inspired by Michell's famed theoretical study. Based on<br>these two ideas, we build an efficient computational framework for the design<br>of lightweight trusses. \AD{We illustrate our framework with a variety of<br>functional specifications and extensions. We show that our method achieves<br>trusses with smaller volumes and is over two orders of magnitude faster<br>compared with recent state-of-the-art approaches.<br>}, }
Endnote
%0 Report %A Jiang, Caigui %A Tang, Chengcheng %A Seidel, Hans-Peter %A Chen, Renjie %A Wonka, Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Computational Design of Lightweight Trusses : %U http://hdl.handle.net/21.11116/0000-0003-A7E9-A %U http://arxiv.org/abs/1901.05637 %D 2019 %X Trusses are load-carrying light-weight structures consisting of bars<br>connected at joints ubiquitously applied in a variety of engineering scenarios.<br>Designing optimal trusses that satisfy functional specifications with a minimal<br>amount of material has interested both theoreticians and practitioners for more<br>than a century. In this paper, we introduce two main ideas to improve upon the<br>state of the art. First, we formulate an alternating linear programming problem<br>for geometry optimization. Second, we introduce two sets of complementary<br>topological operations, including a novel subdivision scheme for global<br>topology refinement inspired by Michell's famed theoretical study. Based on<br>these two ideas, we build an efficient computational framework for the design<br>of lightweight trusses. \AD{We illustrate our framework with a variety of<br>functional specifications and extensions. We show that our method achieves<br>trusses with smaller volumes and is over two orders of magnitude faster<br>compared with recent state-of-the-art approaches.<br> %K Computer Science, Graphics, cs.GR
Hladký, J., Seidel, H.-P., and Steinberger, M. 2019a. The Camera Offset Space: Real-time Potentially Visible Set Computations for Streaming Rendering. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2019)38, 6.
Export
BibTeX
@article{Hladky_SA2019, TITLE = {The Camera Offset Space: Real-time Potentially Visible Set Computations for Streaming Rendering}, AUTHOR = {Hladk{\'y}, Jozef and Seidel, Hans-Peter and Steinberger, Markus}, LANGUAGE = {eng}, ISSN = {0730-0301}, ISBN = {978-1-4503-6008-1}, DOI = {10.1145/3355089.3356530}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2019}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {38}, NUMBER = {6}, EID = {231}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2019}, }
Endnote
%0 Journal Article %A Hladk&#253;, Jozef %A Seidel, Hans-Peter %A Steinberger, Markus %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T The Camera Offset Space: Real-time Potentially Visible Set Computations for Streaming Rendering : %G eng %U http://hdl.handle.net/21.11116/0000-0005-4E4F-D %R 10.1145/3355089.3356530 %7 2019 %D 2019 %J ACM Transactions on Graphics %V 38 %N 6 %Z sequence number: 231 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2019 %O ACM SIGGRAPH Asia 2019 Brisbane, Australia, 17 - 20 November 2019 SA'19 SA 2019 %@ 978-1-4503-6008-1
Hladký, J., Seidel, H.-P., and Steinberger, M. 2019b. Tessellated Shading Streaming. Computer Graphics Forum (Proc. Eurographics Symposium on Rendering 2019)38, 4.
Export
BibTeX
@article{Hladky_EGSR2019, TITLE = {Tessellated Shading Streaming}, AUTHOR = {Hladk{\'y}, Jozef and Seidel, Hans-Peter and Steinberger, Markus}, LANGUAGE = {eng}, ISSN = {0167-7055}, URL = {https://diglib.eg.org/handle/10.1111/cgf13780}, DOI = {10.1111/cgf.13780}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2019}, DATE = {2019}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Rendering)}, VOLUME = {38}, NUMBER = {4}, PAGES = {171--182}, BOOKTITLE = {Eurographics Symposium on Rendering 2019}, EDITOR = {Boubekeur, Tamy and Sen, Pradeep}, }
Endnote
%0 Journal Article %A Hladk&#253;, Jozef %A Seidel, Hans-Peter %A Steinberger, Markus %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Tessellated Shading Streaming : %G eng %U http://hdl.handle.net/21.11116/0000-0004-4897-1 %R 10.1111/cgf.13780 %U https://diglib.eg.org/handle/10.1111/cgf13780 %7 2019 %D 2019 %J Computer Graphics Forum %V 38 %N 4 %& 171 %P 171 - 182 %I Wiley-Blackwell %C Oxford %@ false %B Eurographics Symposium on Rendering 2019 %O Eurographics Symposium on Rendering 2019 EGSR 2019 Strasbourg, France, July 10 - 12, 2109
Elgharib, M., Mallikarjun B R, Tewari, A., et al. 2019. EgoFace: Egocentric Face Performance Capture and Videorealistic Reenactment. http://arxiv.org/abs/1905.10822.
(arXiv: 1905.10822)
Abstract
Face performance capture and reenactment techniques use multiple cameras and<br>sensors, positioned at a distance from the face or mounted on heavy wearable<br>devices. This limits their applications in mobile and outdoor environments. We<br>present EgoFace, a radically new lightweight setup for face performance capture<br>and front-view videorealistic reenactment using a single egocentric RGB camera.<br>Our lightweight setup allows operations in uncontrolled environments, and lends<br>itself to telepresence applications such as video-conferencing from dynamic<br>environments. The input image is projected into a low dimensional latent space<br>of the facial expression parameters. Through careful adversarial training of<br>the parameter-space synthetic rendering, a videorealistic animation is<br>produced. Our problem is challenging as the human visual system is sensitive to<br>the smallest face irregularities that could occur in the final results. This<br>sensitivity is even stronger for video results. Our solution is trained in a<br>pre-processing stage, through a supervised manner without manual annotations.<br>EgoFace captures a wide variety of facial expressions, including mouth<br>movements and asymmetrical expressions. It works under varying illuminations,<br>background, movements, handles people from different ethnicities and can<br>operate in real time.<br>
Export
BibTeX
@online{Elgharib_arXiv1905.10822, TITLE = {{EgoFace}: Egocentric Face Performance Capture and Videorealistic Reenactment}, AUTHOR = {Elgharib, Mohamed and Mallikarjun B R and Tewari, Ayush and Kim, Hyeongwoo and Liu, Wentao and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1905.10822}, EPRINT = {1905.10822}, EPRINTTYPE = {arXiv}, YEAR = {2019}, ABSTRACT = {Face performance capture and reenactment techniques use multiple cameras and<br>sensors, positioned at a distance from the face or mounted on heavy wearable<br>devices. This limits their applications in mobile and outdoor environments. We<br>present EgoFace, a radically new lightweight setup for face performance capture<br>and front-view videorealistic reenactment using a single egocentric RGB camera.<br>Our lightweight setup allows operations in uncontrolled environments, and lends<br>itself to telepresence applications such as video-conferencing from dynamic<br>environments. The input image is projected into a low dimensional latent space<br>of the facial expression parameters. Through careful adversarial training of<br>the parameter-space synthetic rendering, a videorealistic animation is<br>produced. Our problem is challenging as the human visual system is sensitive to<br>the smallest face irregularities that could occur in the final results. This<br>sensitivity is even stronger for video results. Our solution is trained in a<br>pre-processing stage, through a supervised manner without manual annotations.<br>EgoFace captures a wide variety of facial expressions, including mouth<br>movements and asymmetrical expressions. It works under varying illuminations,<br>background, movements, handles people from different ethnicities and can<br>operate in real time.<br>}, }
Endnote
%0 Report %A Elgharib, Mohamed %A Mallikarjun B R, %A Tewari, Ayush %A Kim, Hyeongwoo %A Liu, Wentao %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T EgoFace: Egocentric Face Performance Capture and Videorealistic Reenactment : %G eng %U http://hdl.handle.net/21.11116/0000-0003-F1E6-9 %U http://arxiv.org/abs/1905.10822 %D 2019 %X Face performance capture and reenactment techniques use multiple cameras and<br>sensors, positioned at a distance from the face or mounted on heavy wearable<br>devices. This limits their applications in mobile and outdoor environments. We<br>present EgoFace, a radically new lightweight setup for face performance capture<br>and front-view videorealistic reenactment using a single egocentric RGB camera.<br>Our lightweight setup allows operations in uncontrolled environments, and lends<br>itself to telepresence applications such as video-conferencing from dynamic<br>environments. The input image is projected into a low dimensional latent space<br>of the facial expression parameters. Through careful adversarial training of<br>the parameter-space synthetic rendering, a videorealistic animation is<br>produced. Our problem is challenging as the human visual system is sensitive to<br>the smallest face irregularities that could occur in the final results. This<br>sensitivity is even stronger for video results. Our solution is trained in a<br>pre-processing stage, through a supervised manner without manual annotations.<br>EgoFace captures a wide variety of facial expressions, including mouth<br>movements and asymmetrical expressions. It works under varying illuminations,<br>background, movements, handles people from different ethnicities and can<br>operate in real time.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR %U http://gvv.mpi-inf.mpg.de/projects/EgoFace/
Dokter, M., Hladký, J., Parger, M., Schmalstieg, D., Seidel, H.-P., and Steinberger, M. 2019. Hierarchical Rasterization of Curved Primitives for Vector Graphics Rendering on the GPU. Computer Graphics Forum (Proc. EUROGRAPHICS 2019)38, 2.
Export
BibTeX
@article{Dokter_EG2019, TITLE = {Hierarchical Rasterization of Curved Primitives for Vector Graphics Rendering on the {GPU}}, AUTHOR = {Dokter, Mark and Hladk{\'y}, Jozef and Parger, Mathias and Schmalstieg, Dieter and Seidel, Hans-Peter and Steinberger, Markus}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13622}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2019}, DATE = {2019}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {38}, NUMBER = {2}, PAGES = {93--103}, BOOKTITLE = {EUROGRAPHICS 2019 STAR -- State of The Art Reports}, }
Endnote
%0 Journal Article %A Dokter, Mark %A Hladk&#253;, Jozef %A Parger, Mathias %A Schmalstieg, Dieter %A Seidel, Hans-Peter %A Steinberger, Markus %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Hierarchical Rasterization of Curved Primitives for Vector Graphics Rendering on the GPU : %G eng %U http://hdl.handle.net/21.11116/0000-0002-FC80-1 %R 10.1111/cgf.13622 %7 2019 %D 2019 %J Computer Graphics Forum %V 38 %N 2 %& 93 %P 93 - 103 %I Wiley-Blackwell %C Oxford %@ false %B EUROGRAPHICS 2019 STAR &#8211; State of The Art Reports %O EUROGRAPHICS 2019 The 40th Annual Conference of the European Association for Computer Graphics ; Genova, Italy, May 6-10, 2019 EG 2019
Bemana, M., Myszkowski, K., Seidel, H.-P., and Ritschel, T. 2019a. Neural View-Interpolation for Sparse Light Field Video. http://arxiv.org/abs/1910.13921.
(arXiv: 1910.13921)
Abstract
We suggest representing light field (LF) videos as "one-off" neural networks<br>(NN), i.e., a learned mapping from view-plus-time coordinates to<br>high-resolution color values, trained on sparse views. Initially, this sounds<br>like a bad idea for three main reasons: First, a NN LF will likely have less<br>quality than a same-sized pixel basis representation. Second, only few training<br>data, e.g., 9 exemplars per frame are available for sparse LF videos. Third,<br>there is no generalization across LFs, but across view and time instead.<br>Consequently, a network needs to be trained for each LF video. Surprisingly,<br>these problems can turn into substantial advantages: Other than the linear<br>pixel basis, a NN has to come up with a compact, non-linear i.e., more<br>intelligent, explanation of color, conditioned on the sparse view and time<br>coordinates. As observed for many NN however, this representation now is<br>interpolatable: if the image output for sparse view coordinates is plausible,<br>it is for all intermediate, continuous coordinates as well. Our specific<br>network architecture involves a differentiable occlusion-aware warping step,<br>which leads to a compact set of trainable parameters and consequently fast<br>learning and fast execution.<br>
Export
BibTeX
@online{Bemana_arXiv1910.13921, TITLE = {Neural View-Interpolation for Sparse Light Field Video}, AUTHOR = {Bemana, Mojtaba and Myszkowski, Karol and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1910.13921}, EPRINT = {1910.13921}, EPRINTTYPE = {arXiv}, YEAR = {2019}, ABSTRACT = {We suggest representing light field (LF) videos as "one-off" neural networks<br>(NN), i.e., a learned mapping from view-plus-time coordinates to<br>high-resolution color values, trained on sparse views. Initially, this sounds<br>like a bad idea for three main reasons: First, a NN LF will likely have less<br>quality than a same-sized pixel basis representation. Second, only few training<br>data, e.g., 9 exemplars per frame are available for sparse LF videos. Third,<br>there is no generalization across LFs, but across view and time instead.<br>Consequently, a network needs to be trained for each LF video. Surprisingly,<br>these problems can turn into substantial advantages: Other than the linear<br>pixel basis, a NN has to come up with a compact, non-linear i.e., more<br>intelligent, explanation of color, conditioned on the sparse view and time<br>coordinates. As observed for many NN however, this representation now is<br>interpolatable: if the image output for sparse view coordinates is plausible,<br>it is for all intermediate, continuous coordinates as well. Our specific<br>network architecture involves a differentiable occlusion-aware warping step,<br>which leads to a compact set of trainable parameters and consequently fast<br>learning and fast execution.<br>}, }
Endnote
%0 Report %A Bemana, Mojtaba %A Myszkowski, Karol %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Neural View-Interpolation for Sparse Light Field Video : %G eng %U http://hdl.handle.net/21.11116/0000-0005-7B16-9 %U http://arxiv.org/abs/1910.13921 %D 2019 %X We suggest representing light field (LF) videos as "one-off" neural networks<br>(NN), i.e., a learned mapping from view-plus-time coordinates to<br>high-resolution color values, trained on sparse views. Initially, this sounds<br>like a bad idea for three main reasons: First, a NN LF will likely have less<br>quality than a same-sized pixel basis representation. Second, only few training<br>data, e.g., 9 exemplars per frame are available for sparse LF videos. Third,<br>there is no generalization across LFs, but across view and time instead.<br>Consequently, a network needs to be trained for each LF video. Surprisingly,<br>these problems can turn into substantial advantages: Other than the linear<br>pixel basis, a NN has to come up with a compact, non-linear i.e., more<br>intelligent, explanation of color, conditioned on the sparse view and time<br>coordinates. As observed for many NN however, this representation now is<br>interpolatable: if the image output for sparse view coordinates is plausible,<br>it is for all intermediate, continuous coordinates as well. Our specific<br>network architecture involves a differentiable occlusion-aware warping step,<br>which leads to a compact set of trainable parameters and consequently fast<br>learning and fast execution.<br> %K Computer Science, Graphics, cs.GR,Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Learning, cs.LG,eess.IV
Bemana, M., Keinert, J., Myszkowski, K., et al. 2019b. Learning to Predict Image-based Rendering Artifacts with Respect to a Hidden Reference Image. Computer Graphics Forum (Proc. Pacific Graphics 2019)38, 7.
Export
BibTeX
@article{Bemana_PG2019, TITLE = {Learning to Predict Image-based Rendering Artifacts with Respect to a Hidden Reference Image}, AUTHOR = {Bemana, Mojtaba and Keinert, Joachim and Myszkowski, Karol and B{\"a}tz, Michel and Ziegler, Matthias and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, ISSN = {1467-8659}, DOI = {10.1111/cgf.13862}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2019}, DATE = {2019}, JOURNAL = {Computer Graphics Forum (Proc. Pacific Graphics)}, VOLUME = {38}, NUMBER = {7}, PAGES = {579--589}, BOOKTITLE = {27th Annual International Conference on Computer Graphics and Applications (Pacific Graphics 2019)}, }
Endnote
%0 Journal Article %A Bemana, Mojtaba %A Keinert, Joachim %A Myszkowski, Karol %A B&#228;tz, Michel %A Ziegler, Matthias %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Learning to Predict Image-based Rendering Artifacts with Respect to a Hidden Reference Image : %G eng %U http://hdl.handle.net/21.11116/0000-0004-9BC5-F %R 10.1111/cgf.13862 %7 2019 %D 2019 %J Computer Graphics Forum %V 38 %N 7 %& 579 %P 579 - 589 %I Wiley-Blackwell %C Oxford, UK %@ false %B 27th Annual International Conference on Computer Graphics and Applications %O Pacific Graphics 2019 PG 2019 Seoul, October 14-17, 2019
2018
Zayer, R., Mlakar, D., Steinberger, M., and Seidel, H.-P. 2018a. Layered Fields for Natural Tessellations on Surfaces. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2018)37, 6.
Export
BibTeX
@article{Zayer:2018:LFN, TITLE = {Layered Fields for Natural Tessellations on Surfaces}, AUTHOR = {Zayer, Rhaleb and Mlakar, Daniel and Steinberger, Markus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, ISBN = {978-1-4503-6008-1}, DOI = {10.1145/3272127.3275072}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2018}, DATE = {2018}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {37}, NUMBER = {6}, EID = {264}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2018}, }
Endnote
%0 Journal Article %A Zayer, Rhaleb %A Mlakar, Daniel %A Steinberger, Markus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Layered Fields for Natural Tessellations on Surfaces : %G eng %U http://hdl.handle.net/21.11116/0000-0002-E5E0-E %R 10.1145/3272127.3275072 %7 2018 %D 2018 %J ACM Transactions on Graphics %V 37 %N 6 %Z sequence number: 264 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2018 %O ACM SIGGRAPH Asia 2018 Tokyo, Japan, December 04 - 07, 2018 SA'18 SA 2018 %@ 978-1-4503-6008-1
Zayer, R., Mlakar, D., Steinberger, M., and Seidel, H.-P. 2018b. Layered Fields for Natural Tessellations on Surfaces. http://arxiv.org/abs/1804.09152.
(arXiv: 1804.09152)
Abstract
Mimicking natural tessellation patterns is a fascinating multi-disciplinary problem. Geometric methods aiming at reproducing such partitions on surface meshes are commonly based on the Voronoi model and its variants, and are often faced with challenging issues such as metric estimation, geometric, topological complications, and most critically parallelization. In this paper, we introduce an alternate model which may be of value for resolving these issues. We drop the assumption that regions need to be separated by lines. Instead, we regard region boundaries as narrow bands and we model the partition as a set of smooth functions layered over the surface. Given an initial set of seeds or regions, the partition emerges as the solution of a time dependent set of partial differential equations describing concurrently evolving fronts on the surface. Our solution does not require geodesic estimation, elaborate numerical solvers, or complicated bookkeeping data structures. The cost per time-iteration is dominated by the multiplication and addition of two sparse matrices. Extension of our approach in a Lloyd's algorithm fashion can be easily achieved and the extraction of the dual mesh can be conveniently preformed in parallel through matrix algebra. As our approach relies mainly on basic linear algebra kernels, it lends itself to efficient implementation on modern graphics hardware.
Export
BibTeX
@online{Zayer_arXiv1804.09152, TITLE = {Layered Fields for Natural Tessellations on Surfaces}, AUTHOR = {Zayer, Rhaleb and Mlakar, Daniel and Steinberger, Markus and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1804.09152}, EPRINT = {1804.09152}, EPRINTTYPE = {arXiv}, YEAR = {2018}, ABSTRACT = {Mimicking natural tessellation patterns is a fascinating multi-disciplinary problem. Geometric methods aiming at reproducing such partitions on surface meshes are commonly based on the Voronoi model and its variants, and are often faced with challenging issues such as metric estimation, geometric, topological complications, and most critically parallelization. In this paper, we introduce an alternate model which may be of value for resolving these issues. We drop the assumption that regions need to be separated by lines. Instead, we regard region boundaries as narrow bands and we model the partition as a set of smooth functions layered over the surface. Given an initial set of seeds or regions, the partition emerges as the solution of a time dependent set of partial differential equations describing concurrently evolving fronts on the surface. Our solution does not require geodesic estimation, elaborate numerical solvers, or complicated bookkeeping data structures. The cost per time-iteration is dominated by the multiplication and addition of two sparse matrices. Extension of our approach in a Lloyd's algorithm fashion can be easily achieved and the extraction of the dual mesh can be conveniently preformed in parallel through matrix algebra. As our approach relies mainly on basic linear algebra kernels, it lends itself to efficient implementation on modern graphics hardware.}, }
Endnote
%0 Report %A Zayer, Rhaleb %A Mlakar, Daniel %A Steinberger, Markus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Layered Fields for Natural Tessellations on Surfaces : %G eng %U http://hdl.handle.net/21.11116/0000-0002-152D-5 %U http://arxiv.org/abs/1804.09152 %D 2018 %X Mimicking natural tessellation patterns is a fascinating multi-disciplinary problem. Geometric methods aiming at reproducing such partitions on surface meshes are commonly based on the Voronoi model and its variants, and are often faced with challenging issues such as metric estimation, geometric, topological complications, and most critically parallelization. In this paper, we introduce an alternate model which may be of value for resolving these issues. We drop the assumption that regions need to be separated by lines. Instead, we regard region boundaries as narrow bands and we model the partition as a set of smooth functions layered over the surface. Given an initial set of seeds or regions, the partition emerges as the solution of a time dependent set of partial differential equations describing concurrently evolving fronts on the surface. Our solution does not require geodesic estimation, elaborate numerical solvers, or complicated bookkeeping data structures. The cost per time-iteration is dominated by the multiplication and addition of two sparse matrices. Extension of our approach in a Lloyd's algorithm fashion can be easily achieved and the extraction of the dual mesh can be conveniently preformed in parallel through matrix algebra. As our approach relies mainly on basic linear algebra kernels, it lends itself to efficient implementation on modern graphics hardware. %K Computer Science, Graphics, cs.GR,Computer Science, Distributed, Parallel, and Cluster Computing, cs.DC
Xu, W., Chatterjee, A., Zollhöfer, M., et al. 2018a. MonoPerfCap: Human Performance Capture from Monocular Video. ACM Transactions on Graphics37, 2.
Abstract
We present the first marker-less approach for temporally coherent 3D performance capture of a human with general clothing from monocular video. Our approach reconstructs articulated human skeleton motion as well as medium-scale non-rigid surface deformations in general scenes. Human performance capture is a challenging problem due to the large range of articulation, potentially fast motion, and considerable non-rigid deformations, even from multi-view data. Reconstruction from monocular video alone is drastically more challenging, since strong occlusions and the inherent depth ambiguity lead to a highly ill-posed reconstruction problem. We tackle these challenges by a novel approach that employs sparse 2D and 3D human pose detections from a convolutional neural network using a batch-based pose estimation strategy. Joint recovery of per-batch motion allows to resolve the ambiguities of the monocular reconstruction problem based on a low dimensional trajectory subspace. In addition, we propose refinement of the surface geometry based on fully automatically extracted silhouettes to enable medium-scale non-rigid alignment. We demonstrate state-of-the-art performance capture results that enable exciting applications such as video editing and free viewpoint video, previously infeasible from monocular video. Our qualitative and quantitative evaluation demonstrates that our approach significantly outperforms previous monocular methods in terms of accuracy, robustness and scene complexity that can be handled.
Export
BibTeX
@article{Xu_ToG2018, TITLE = {{MonoPerfCap}: Human Performance Capture from Monocular Video}, AUTHOR = {Xu, Weipeng and Chatterjee, Avishek and Zollh{\"o}fer, Michael and Rhodin, Helge and Mehta, Dushyant and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3181973}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2018}, DATE = {2018}, ABSTRACT = {We present the first marker-less approach for temporally coherent 3D performance capture of a human with general clothing from monocular video. Our approach reconstructs articulated human skeleton motion as well as medium-scale non-rigid surface deformations in general scenes. Human performance capture is a challenging problem due to the large range of articulation, potentially fast motion, and considerable non-rigid deformations, even from multi-view data. Reconstruction from monocular video alone is drastically more challenging, since strong occlusions and the inherent depth ambiguity lead to a highly ill-posed reconstruction problem. We tackle these challenges by a novel approach that employs sparse 2D and 3D human pose detections from a convolutional neural network using a batch-based pose estimation strategy. Joint recovery of per-batch motion allows to resolve the ambiguities of the monocular reconstruction problem based on a low dimensional trajectory subspace. In addition, we propose refinement of the surface geometry based on fully automatically extracted silhouettes to enable medium-scale non-rigid alignment. We demonstrate state-of-the-art performance capture results that enable exciting applications such as video editing and free viewpoint video, previously infeasible from monocular video. Our qualitative and quantitative evaluation demonstrates that our approach significantly outperforms previous monocular methods in terms of accuracy, robustness and scene complexity that can be handled.}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {37}, NUMBER = {2}, EID = {27}, }
Endnote
%0 Journal Article %A Xu, Weipeng %A Chatterjee, Avishek %A Zollh&#246;fer, Michael %A Rhodin, Helge %A Mehta, Dushyant %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T MonoPerfCap: Human Performance Capture from Monocular Video : %G eng %U http://hdl.handle.net/21.11116/0000-0001-E20E-1 %R 10.1145/3181973 %7 2017 %D 2018 %X We present the first marker-less approach for temporally coherent 3D performance capture of a human with general clothing from monocular video. Our approach reconstructs articulated human skeleton motion as well as medium-scale non-rigid surface deformations in general scenes. Human performance capture is a challenging problem due to the large range of articulation, potentially fast motion, and considerable non-rigid deformations, even from multi-view data. Reconstruction from monocular video alone is drastically more challenging, since strong occlusions and the inherent depth ambiguity lead to a highly ill-posed reconstruction problem. We tackle these challenges by a novel approach that employs sparse 2D and 3D human pose detections from a convolutional neural network using a batch-based pose estimation strategy. Joint recovery of per-batch motion allows to resolve the ambiguities of the monocular reconstruction problem based on a low dimensional trajectory subspace. In addition, we propose refinement of the surface geometry based on fully automatically extracted silhouettes to enable medium-scale non-rigid alignment. We demonstrate state-of-the-art performance capture results that enable exciting applications such as video editing and free viewpoint video, previously infeasible from monocular video. Our qualitative and quantitative evaluation demonstrates that our approach significantly outperforms previous monocular methods in terms of accuracy, robustness and scene complexity that can be handled. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR %J ACM Transactions on Graphics %V 37 %N 2 %Z sequence number: 27 %I ACM %C New York, NY %@ false
Xu, W., Chatterjee, A., Zollhöfer, M., et al. 2018b. Mo2Cap2: Real-time Mobile 3D Motion Capture with a Cap-mounted Fisheye Camera. http://arxiv.org/abs/1803.05959.
(arXiv: 1803.05959)
Abstract
We propose the first real-time approach for the egocentric estimation of 3D human body pose in a wide range of unconstrained everyday activities. This setting has a unique set of challenges, such as mobility of the hardware setup, and robustness to long capture sessions with fast recovery from tracking failures. We tackle these challenges based on a novel lightweight setup that converts a standard baseball cap to a device for high-quality pose estimation based on a single cap-mounted fisheye camera. From the captured egocentric live stream, our CNN based 3D pose estimation approach runs at 60Hz on a consumer-level GPU. In addition to the novel hardware setup, our other main contributions are: 1) a large ground truth training corpus of top-down fisheye images and 2) a novel disentangled 3D pose estimation approach that takes the unique properties of the egocentric viewpoint into account. As shown by our evaluation, we achieve lower 3D joint error as well as better 2D overlay than the existing baselines.
Export
BibTeX
@online{Xu_arXiv1803.05959, TITLE = {{Mo2Cap2}: Real-time Mobile {3D} Motion Capture with a Cap-mounted Fisheye Camera}, AUTHOR = {Xu, Weipeng and Chatterjee, Avishek and Zollh{\"o}fer, Michael and Rhodin, Helge and Fua, Pascal and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1803.05959}, EPRINT = {1803.05959}, EPRINTTYPE = {arXiv}, YEAR = {2018}, ABSTRACT = {We propose the first real-time approach for the egocentric estimation of 3D human body pose in a wide range of unconstrained everyday activities. This setting has a unique set of challenges, such as mobility of the hardware setup, and robustness to long capture sessions with fast recovery from tracking failures. We tackle these challenges based on a novel lightweight setup that converts a standard baseball cap to a device for high-quality pose estimation based on a single cap-mounted fisheye camera. From the captured egocentric live stream, our CNN based 3D pose estimation approach runs at 60Hz on a consumer-level GPU. In addition to the novel hardware setup, our other main contributions are: 1) a large ground truth training corpus of top-down fisheye images and 2) a novel disentangled 3D pose estimation approach that takes the unique properties of the egocentric viewpoint into account. As shown by our evaluation, we achieve lower 3D joint error as well as better 2D overlay than the existing baselines.}, }
Endnote
%0 Report %A Xu, Weipeng %A Chatterjee, Avishek %A Zollh&#246;fer, Michael %A Rhodin, Helge %A Fua, Pascal %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Mo2Cap2: Real-time Mobile 3D Motion Capture with a Cap-mounted Fisheye Camera : %G eng %U http://hdl.handle.net/21.11116/0000-0001-3C65-B %U http://arxiv.org/abs/1803.05959 %D 2018 %X We propose the first real-time approach for the egocentric estimation of 3D human body pose in a wide range of unconstrained everyday activities. This setting has a unique set of challenges, such as mobility of the hardware setup, and robustness to long capture sessions with fast recovery from tracking failures. We tackle these challenges based on a novel lightweight setup that converts a standard baseball cap to a device for high-quality pose estimation based on a single cap-mounted fisheye camera. From the captured egocentric live stream, our CNN based 3D pose estimation approach runs at 60Hz on a consumer-level GPU. In addition to the novel hardware setup, our other main contributions are: 1) a large ground truth training corpus of top-down fisheye images and 2) a novel disentangled 3D pose estimation approach that takes the unique properties of the egocentric viewpoint into account. As shown by our evaluation, we achieve lower 3D joint error as well as better 2D overlay than the existing baselines. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Wolski, K., Giunchi, D., Ye, N., et al. 2018. Dataset and Metrics for Predicting Local Visible Differences. ACM Transactions on Graphics37, 5.
Export
BibTeX
@article{wolski2018dataset, TITLE = {Dataset and Metrics for Predicting Local Visible Differences}, AUTHOR = {Wolski, Krzysztof and Giunchi, Daniele and Ye, Nanyang and Didyk, Piotr and Myszkowski, Karol and Mantiuk, Rados{\textbackslash}l{\textbraceleft}{\textbraceright}aw and Seidel, Hans-Peter and Steed, Anthony and Mantiuk, Rafa{\l} K.}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3196493}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2018}, DATE = {2018}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {37}, NUMBER = {5}, EID = {172}, }
Endnote
%0 Journal Article %A Wolski, Krzysztof %A Giunchi, Daniele %A Ye, Nanyang %A Didyk, Piotr %A Myszkowski, Karol %A Mantiuk, Rados\l{}aw %A Seidel, Hans-Peter %A Steed, Anthony %A Mantiuk, Rafa&#322; K. %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Dataset and Metrics for Predicting Local Visible Differences : %G eng %U http://hdl.handle.net/21.11116/0000-0001-5F75-2 %R 10.1145/3196493 %7 2018 %D 2018 %J ACM Transactions on Graphics %V 37 %N 5 %Z sequence number: 172 %I ACM %C New York, NY %@ false
Winter, M., Mlakar, D., Zayer, R., Seidel, H.-P., and Steinberger, M. 2018. faimGraph: High Performance Management of Fully-Dynamic Graphs Under Tight Memory Constraints on the GPU. The International Conference for High Performance Computing, Networking, Storage, and Analysis (SC 2018), IEEE.
Export
BibTeX
@inproceedings{Winter:2018:FHP, TITLE = {{faimGraph}: {H}igh Performance Management of Fully-Dynamic Graphs Under Tight Memory Constraints on the {GPU}}, AUTHOR = {Winter, Martin and Mlakar, Daniel and Zayer, Rhaleb and Seidel, Hans-Peter and Steinberger, Markus}, LANGUAGE = {eng}, ISBN = {978-1-5386-8384-2}, URL = {http://conferences.computer.org/sc/2018/#!/home}, PUBLISHER = {IEEE}, YEAR = {2018}, BOOKTITLE = {The International Conference for High Performance Computing, Networking, Storage, and Analysis (SC 2018)}, PAGES = {754--766}, ADDRESS = {Dallas, TX, USA}, }
Endnote
%0 Conference Proceedings %A Winter, Martin %A Mlakar, Daniel %A Zayer, Rhaleb %A Seidel, Hans-Peter %A Steinberger, Markus %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T faimGraph: High Performance Management of Fully-Dynamic Graphs Under Tight Memory Constraints on the GPU : %G eng %U http://hdl.handle.net/21.11116/0000-0002-E5E6-8 %D 2018 %B The International Conference for High Performance Computing, Networking, Storage, and Analysis %Z date of event: 2018-11-11 - 2018-11-16 %C Dallas, TX, USA %B The International Conference for High Performance Computing, Networking, Storage, and Analysis %P 754 - 766 %I IEEE %@ 978-1-5386-8384-2
Tewari, A., Bernard, F., Garrido, P., et al. 2018. FML: Face Model Learning from Videos. http://arxiv.org/abs/1812.07603.
(arXiv: 1812.07603)
Abstract
Monocular image-based 3D reconstruction of faces is a long-standing problem<br>in computer vision. Since image data is a 2D projection of a 3D face, the<br>resulting depth ambiguity makes the problem ill-posed. Most existing methods<br>rely on data-driven priors that are built from limited 3D face scans. In<br>contrast, we propose multi-frame video-based self-supervised training of a deep<br>network that (i) learns a face identity model both in shape and appearance<br>while (ii) jointly learning to reconstruct 3D faces. Our face model is learned<br>using only corpora of in-the-wild video clips collected from the Internet. This<br>virtually endless source of training data enables learning of a highly general<br>3D face model. In order to achieve this, we propose a novel multi-frame<br>consistency loss that ensures consistent shape and appearance across multiple<br>frames of a subject's face, thus minimizing depth ambiguity. At test time we<br>can use an arbitrary number of frames, so that we can perform both monocular as<br>well as multi-frame reconstruction.<br>
Export
BibTeX
@online{tewari2018fml, TITLE = {{FML}: {Face Model Learning from Videos}}, AUTHOR = {Tewari, Ayush and Bernard, Florian and Garrido, Pablo and Bharaj, Gaurav and Elgharib, Mohamed and Seidel, Hans-Peter and P{\'e}rez, Patrick and Zollh{\"o}fer, Michael and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1812.07603}, EPRINT = {1812.07603}, EPRINTTYPE = {arXiv}, YEAR = {2018}, ABSTRACT = {Monocular image-based 3D reconstruction of faces is a long-standing problem<br>in computer vision. Since image data is a 2D projection of a 3D face, the<br>resulting depth ambiguity makes the problem ill-posed. Most existing methods<br>rely on data-driven priors that are built from limited 3D face scans. In<br>contrast, we propose multi-frame video-based self-supervised training of a deep<br>network that (i) learns a face identity model both in shape and appearance<br>while (ii) jointly learning to reconstruct 3D faces. Our face model is learned<br>using only corpora of in-the-wild video clips collected from the Internet. This<br>virtually endless source of training data enables learning of a highly general<br>3D face model. In order to achieve this, we propose a novel multi-frame<br>consistency loss that ensures consistent shape and appearance across multiple<br>frames of a subject's face, thus minimizing depth ambiguity. At test time we<br>can use an arbitrary number of frames, so that we can perform both monocular as<br>well as multi-frame reconstruction.<br>}, }
Endnote
%0 Report %A Tewari, Ayush %A Bernard, Florian %A Garrido, Pablo %A Bharaj, Gaurav %A Elgharib, Mohamed %A Seidel, Hans-Peter %A P&#233;rez, Patrick %A Zollh&#246;fer, Michael %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T FML: Face Model Learning from Videos : %G eng %U http://hdl.handle.net/21.11116/0000-0002-EF79-A %U http://arxiv.org/abs/1812.07603 %D 2018 %X Monocular image-based 3D reconstruction of faces is a long-standing problem<br>in computer vision. Since image data is a 2D projection of a 3D face, the<br>resulting depth ambiguity makes the problem ill-posed. Most existing methods<br>rely on data-driven priors that are built from limited 3D face scans. In<br>contrast, we propose multi-frame video-based self-supervised training of a deep<br>network that (i) learns a face identity model both in shape and appearance<br>while (ii) jointly learning to reconstruct 3D faces. Our face model is learned<br>using only corpora of in-the-wild video clips collected from the Internet. This<br>virtually endless source of training data enables learning of a highly general<br>3D face model. In order to achieve this, we propose a novel multi-frame<br>consistency loss that ensures consistent shape and appearance across multiple<br>frames of a subject's face, thus minimizing depth ambiguity. At test time we<br>can use an arbitrary number of frames, so that we can perform both monocular as<br>well as multi-frame reconstruction.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV %U https://www.youtube.com/watch?v=SG2BwxCw0lQ
Serrano, A., Gutierrez, D., Myszkowski, K., Seidel, H.-P., and Masia, B. 2018. An Intuitive Control Space for Material Appearance. http://arxiv.org/abs/1806.04950.
(arXiv: 1806.04950)
Abstract
Many different techniques for measuring material appearance have been<br>proposed in the last few years. These have produced large public datasets,<br>which have been used for accurate, data-driven appearance modeling. However,<br>although these datasets have allowed us to reach an unprecedented level of<br>realism in visual appearance, editing the captured data remains a challenge. In<br>this paper, we present an intuitive control space for predictable editing of<br>captured BRDF data, which allows for artistic creation of plausible novel<br>material appearances, bypassing the difficulty of acquiring novel samples. We<br>first synthesize novel materials, extending the existing MERL dataset up to 400<br>mathematically valid BRDFs. We then design a large-scale experiment, gathering<br>56,000 subjective ratings on the high-level perceptual attributes that best<br>describe our extended dataset of materials. Using these ratings, we build and<br>train networks of radial basis functions to act as functionals mapping the<br>perceptual attributes to an underlying PCA-based representation of BRDFs. We<br>show that our functionals are excellent predictors of the perceived attributes<br>of appearance. Our control space enables many applications, including intuitive<br>material editing of a wide range of visual properties, guidance for gamut<br>mapping, analysis of the correlation between perceptual attributes, or novel<br>appearance similarity metrics. Moreover, our methodology can be used to derive<br>functionals applicable to classic analytic BRDF representations. We release our<br>code and dataset publicly, in order to support and encourage further research<br>in this direction.<br>
Export
BibTeX
@online{Serrano_arXiv1806.04950, TITLE = {An Intuitive Control Space for Material Appearance}, AUTHOR = {Serrano, Ana and Gutierrez, Diego and Myszkowski, Karol and Seidel, Hans-Peter and Masia, Belen}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1806.04950}, EPRINT = {1806.04950}, EPRINTTYPE = {arXiv}, YEAR = {2018}, ABSTRACT = {Many different techniques for measuring material appearance have been<br>proposed in the last few years. These have produced large public datasets,<br>which have been used for accurate, data-driven appearance modeling. However,<br>although these datasets have allowed us to reach an unprecedented level of<br>realism in visual appearance, editing the captured data remains a challenge. In<br>this paper, we present an intuitive control space for predictable editing of<br>captured BRDF data, which allows for artistic creation of plausible novel<br>material appearances, bypassing the difficulty of acquiring novel samples. We<br>first synthesize novel materials, extending the existing MERL dataset up to 400<br>mathematically valid BRDFs. We then design a large-scale experiment, gathering<br>56,000 subjective ratings on the high-level perceptual attributes that best<br>describe our extended dataset of materials. Using these ratings, we build and<br>train networks of radial basis functions to act as functionals mapping the<br>perceptual attributes to an underlying PCA-based representation of BRDFs. We<br>show that our functionals are excellent predictors of the perceived attributes<br>of appearance. Our control space enables many applications, including intuitive<br>material editing of a wide range of visual properties, guidance for gamut<br>mapping, analysis of the correlation between perceptual attributes, or novel<br>appearance similarity metrics. Moreover, our methodology can be used to derive<br>functionals applicable to classic analytic BRDF representations. We release our<br>code and dataset publicly, in order to support and encourage further research<br>in this direction.<br>}, }
Endnote
%0 Report %A Serrano, Ana %A Gutierrez, Diego %A Myszkowski, Karol %A Seidel, Hans-Peter %A Masia, Belen %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T An Intuitive Control Space for Material Appearance : %G eng %U http://hdl.handle.net/21.11116/0000-0002-151E-6 %U http://arxiv.org/abs/1806.04950 %D 2018 %X Many different techniques for measuring material appearance have been<br>proposed in the last few years. These have produced large public datasets,<br>which have been used for accurate, data-driven appearance modeling. However,<br>although these datasets have allowed us to reach an unprecedented level of<br>realism in visual appearance, editing the captured data remains a challenge. In<br>this paper, we present an intuitive control space for predictable editing of<br>captured BRDF data, which allows for artistic creation of plausible novel<br>material appearances, bypassing the difficulty of acquiring novel samples. We<br>first synthesize novel materials, extending the existing MERL dataset up to 400<br>mathematically valid BRDFs. We then design a large-scale experiment, gathering<br>56,000 subjective ratings on the high-level perceptual attributes that best<br>describe our extended dataset of materials. Using these ratings, we build and<br>train networks of radial basis functions to act as functionals mapping the<br>perceptual attributes to an underlying PCA-based representation of BRDFs. We<br>show that our functionals are excellent predictors of the perceived attributes<br>of appearance. Our control space enables many applications, including intuitive<br>material editing of a wide range of visual properties, guidance for gamut<br>mapping, analysis of the correlation between perceptual attributes, or novel<br>appearance similarity metrics. Moreover, our methodology can be used to derive<br>functionals applicable to classic analytic BRDF representations. We release our<br>code and dataset publicly, in order to support and encourage further research<br>in this direction.<br> %K Computer Science, Graphics, cs.GR
Myszkowski, K., Tursun, O.T., Kellnhofer, P., et al. 2018. Perceptual Display: Apparent Enhancement of Scene Detail and Depth. Electronic Imaging (Proc. HVEI 2018), SPIE/IS&T.
(Keynote Talk)
Abstract
Predicting human visual perception of image differences has several applications such as compression, rendering, editing and retargeting. Current approaches however, ignore the fact that the human visual system compensates for geometric transformations, e.g. we see that an image and a rotated copy are identical. Instead, they will report a large, false-positive difference. At the same time, if the transformations become too strong or too spatially incoherent, comparing two images indeed gets increasingly difficult. Between these two extremes, we propose a system to quantify the effect of transformations, not only on the perception of image differences, but also on saliency and motion parallax. To this end, we first fit local homographies to a given optical flow field and then convert this field into a field of elementary transformations such as translation, rotation, scaling, and perspective. We conduct a perceptual experiment quantifying the increase of difficulty when compensating for elementary transformations. Transformation entropy is proposed as a novel measure of complexity in a flow field. This representation is then used for applications, such as comparison of non-aligned images, where transformations cause threshold elevation, and detection of salient transformations.
Export
BibTeX
@inproceedings{Myszkowski2018Perceptual, TITLE = {Perceptual Display: Apparent Enhancement of Scene Detail and Depth}, AUTHOR = {Myszkowski, Karol and Tursun, Okan Tarhan and Kellnhofer, Petr and Templin, Krzysztof and Arabadzhiyska, Elena and Didyk, Piotr and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {2470-1173}, DOI = {10.2352/ISSN.2470-1173.2018.14.HVEI-501}, PUBLISHER = {SPIE/IS\&T}, YEAR = {2018}, ABSTRACT = {Predicting human visual perception of image differences has several applications such as compression, rendering, editing and retargeting. Current approaches however, ignore the fact that the human visual system compensates for geometric transformations, e.g. we see that an image and a rotated copy are identical. Instead, they will report a large, false-positive difference. At the same time, if the transformations become too strong or too spatially incoherent, comparing two images indeed gets increasingly difficult. Between these two extremes, we propose a system to quantify the effect of transformations, not only on the perception of image differences, but also on saliency and motion parallax. To this end, we first fit local homographies to a given optical flow field and then convert this field into a field of elementary transformations such as translation, rotation, scaling, and perspective. We conduct a perceptual experiment quantifying the increase of difficulty when compensating for elementary transformations. Transformation entropy is proposed as a novel measure of complexity in a flow field. This representation is then used for applications, such as comparison of non-aligned images, where transformations cause threshold elevation, and detection of salient transformations.}, BOOKTITLE = {Human Vision and Electronic Imaging (HVEI 2018)}, PAGES = {1--10}, EID = {501}, JOURNAL = {Electronic Imaging (Proc. HVEI)}, VOLUME = {2018}, ADDRESS = {San Francisco, CA, USA}, }
Endnote
%0 Conference Proceedings %A Myszkowski, Karol %A Tursun, Okan Tarhan %A Kellnhofer, Petr %A Templin, Krzysztof %A Arabadzhiyska, Elena %A Didyk, Piotr %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptual Display: Apparent Enhancement of Scene Detail and Depth : %G eng %U http://hdl.handle.net/21.11116/0000-0001-5F64-5 %R 10.2352/ISSN.2470-1173.2018.14.HVEI-501 %D 2018 %B Human Vision and Electronic Imaging %Z date of event: 2018-01-28 - 2018-02-02 %C San Francisco, CA, USA %X Predicting human visual perception of image differences has several applications such as compression, rendering, editing and retargeting. Current approaches however, ignore the fact that the human visual system compensates for geometric transformations, e.g. we see that an image and a rotated copy are identical. Instead, they will report a large, false-positive difference. At the same time, if the transformations become too strong or too spatially incoherent, comparing two images indeed gets increasingly difficult. Between these two extremes, we propose a system to quantify the effect of transformations, not only on the perception of image differences, but also on saliency and motion parallax. To this end, we first fit local homographies to a given optical flow field and then convert this field into a field of elementary transformations such as translation, rotation, scaling, and perspective. We conduct a perceptual experiment quantifying the increase of difficulty when compensating for elementary transformations. Transformation entropy is proposed as a novel measure of complexity in a flow field. This representation is then used for applications, such as comparison of non-aligned images, where transformations cause threshold elevation, and detection of salient transformations. %B Human Vision and Electronic Imaging %P 1 - 10 %Z sequence number: 501 %I SPIE/IS&T %J Electronic Imaging %V 2018 %@ false
Mlakar, D., Winter, M., Seidel, H.-P., Steinberger, M., and Zayer, R. 2018. AlSub: Fully Parallel and Modular Subdivision. http://arxiv.org/abs/1809.06047.
(arXiv: 1809.06047)
Abstract
In recent years, mesh subdivision---the process of forging smooth free-form<br>surfaces from coarse polygonal meshes---has become an indispensable production<br>instrument. Although subdivision performance is crucial during simulation,<br>animation and rendering, state-of-the-art approaches still rely on serial<br>implementations for complex parts of the subdivision process. Therefore, they<br>often fail to harness the power of modern parallel devices, like the graphics<br>processing unit (GPU), for large parts of the algorithm and must resort to<br>time-consuming serial preprocessing. In this paper, we show that a complete<br>parallelization of the subdivision process for modern architectures is<br>possible. Building on sparse matrix linear algebra, we show how to structure<br>the complete subdivision process into a sequence of algebra operations. By<br>restructuring and grouping these operations, we adapt the process for different<br>use cases, such as regular subdivision of dynamic meshes, uniform subdivision<br>for immutable topology, and feature-adaptive subdivision for efficient<br>rendering of animated models. As the same machinery is used for all use cases,<br>identical subdivision results are achieved in all parts of the production<br>pipeline. As a second contribution, we show how these linear algebra<br>formulations can effectively be translated into efficient GPU kernels. Applying<br>our strategies to $\sqrt{3}$, Loop and Catmull-Clark subdivision shows<br>significant speedups of our approach compared to state-of-the-art solutions,<br>while we completely avoid serial preprocessing.<br>
Export
BibTeX
@online{Mlakar_arXiv1809.06047, TITLE = {{AlSub}: {Fully Parallel and Modular Subdivision}}, AUTHOR = {Mlakar, Daniel and Winter, Martin and Seidel, Hans-Peter and Steinberger, Markus and Zayer, Rhaleb}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1809.06047}, EPRINT = {1809.06047}, EPRINTTYPE = {arXiv}, YEAR = {2018}, ABSTRACT = {In recent years, mesh subdivision---the process of forging smooth free-form<br>surfaces from coarse polygonal meshes---has become an indispensable production<br>instrument. Although subdivision performance is crucial during simulation,<br>animation and rendering, state-of-the-art approaches still rely on serial<br>implementations for complex parts of the subdivision process. Therefore, they<br>often fail to harness the power of modern parallel devices, like the graphics<br>processing unit (GPU), for large parts of the algorithm and must resort to<br>time-consuming serial preprocessing. In this paper, we show that a complete<br>parallelization of the subdivision process for modern architectures is<br>possible. Building on sparse matrix linear algebra, we show how to structure<br>the complete subdivision process into a sequence of algebra operations. By<br>restructuring and grouping these operations, we adapt the process for different<br>use cases, such as regular subdivision of dynamic meshes, uniform subdivision<br>for immutable topology, and feature-adaptive subdivision for efficient<br>rendering of animated models. As the same machinery is used for all use cases,<br>identical subdivision results are achieved in all parts of the production<br>pipeline. As a second contribution, we show how these linear algebra<br>formulations can effectively be translated into efficient GPU kernels. Applying<br>our strategies to $\sqrt{3}$, Loop and Catmull-Clark subdivision shows<br>significant speedups of our approach compared to state-of-the-art solutions,<br>while we completely avoid serial preprocessing.<br>}, }
Endnote
%0 Report %A Mlakar, Daniel %A Winter, Martin %A Seidel, Hans-Peter %A Steinberger, Markus %A Zayer, Rhaleb %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T AlSub: Fully Parallel and Modular Subdivision : %G eng %U http://hdl.handle.net/21.11116/0000-0002-E5E2-C %U http://arxiv.org/abs/1809.06047 %D 2018 %X In recent years, mesh subdivision---the process of forging smooth free-form<br>surfaces from coarse polygonal meshes---has become an indispensable production<br>instrument. Although subdivision performance is crucial during simulation,<br>animation and rendering, state-of-the-art approaches still rely on serial<br>implementations for complex parts of the subdivision process. Therefore, they<br>often fail to harness the power of modern parallel devices, like the graphics<br>processing unit (GPU), for large parts of the algorithm and must resort to<br>time-consuming serial preprocessing. In this paper, we show that a complete<br>parallelization of the subdivision process for modern architectures is<br>possible. Building on sparse matrix linear algebra, we show how to structure<br>the complete subdivision process into a sequence of algebra operations. By<br>restructuring and grouping these operations, we adapt the process for different<br>use cases, such as regular subdivision of dynamic meshes, uniform subdivision<br>for immutable topology, and feature-adaptive subdivision for efficient<br>rendering of animated models. As the same machinery is used for all use cases,<br>identical subdivision results are achieved in all parts of the production<br>pipeline. As a second contribution, we show how these linear algebra<br>formulations can effectively be translated into efficient GPU kernels. Applying<br>our strategies to $\sqrt{3}$, Loop and Catmull-Clark subdivision shows<br>significant speedups of our approach compared to state-of-the-art solutions,<br>while we completely avoid serial preprocessing.<br> %K Computer Science, Graphics, cs.GR
Meka, A., Maximov, M., Zollhöfer, M., et al. 2018a. LIME: Live Intrinsic Material Estimation. http://arxiv.org/abs/1801.01075.
(arXiv: 1801.01075)
Abstract
We present the first end to end approach for real time material estimation for general object shapes with uniform material that only requires a single color image as input. In addition to Lambertian surface properties, our approach fully automatically computes the specular albedo, material shininess, and a foreground segmentation. We tackle this challenging and ill posed inverse rendering problem using recent advances in image to image translation techniques based on deep convolutional encoder decoder architectures. The underlying core representations of our approach are specular shading, diffuse shading and mirror images, which allow to learn the effective and accurate separation of diffuse and specular albedo. In addition, we propose a novel highly efficient perceptual rendering loss that mimics real world image formation and obtains intermediate results even during run time. The estimation of material parameters at real time frame rates enables exciting mixed reality applications, such as seamless illumination consistent integration of virtual objects into real world scenes, and virtual material cloning. We demonstrate our approach in a live setup, compare it to the state of the art, and demonstrate its effectiveness through quantitative and qualitative evaluation.
Export
BibTeX
@online{Meka_arXiv1801.01075, TITLE = {LIME: {L}ive Intrinsic Material Estimation}, AUTHOR = {Meka, Abhimitra and Maximov, Maxim and Zollh{\"o}fer, Michael and Chatterjee, Avishek and Seidel, Hans-Peter and Richardt, Christian and Theobalt, Christian}, URL = {http://arxiv.org/abs/1801.01075}, EPRINT = {1801.01075}, EPRINTTYPE = {arXiv}, YEAR = {2018}, ABSTRACT = {We present the first end to end approach for real time material estimation for general object shapes with uniform material that only requires a single color image as input. In addition to Lambertian surface properties, our approach fully automatically computes the specular albedo, material shininess, and a foreground segmentation. We tackle this challenging and ill posed inverse rendering problem using recent advances in image to image translation techniques based on deep convolutional encoder decoder architectures. The underlying core representations of our approach are specular shading, diffuse shading and mirror images, which allow to learn the effective and accurate separation of diffuse and specular albedo. In addition, we propose a novel highly efficient perceptual rendering loss that mimics real world image formation and obtains intermediate results even during run time. The estimation of material parameters at real time frame rates enables exciting mixed reality applications, such as seamless illumination consistent integration of virtual objects into real world scenes, and virtual material cloning. We demonstrate our approach in a live setup, compare it to the state of the art, and demonstrate its effectiveness through quantitative and qualitative evaluation.}, }
Endnote
%0 Report %A Meka, Abhimitra %A Maximov, Maxim %A Zollh&#246;fer, Michael %A Chatterjee, Avishek %A Seidel, Hans-Peter %A Richardt, Christian %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society D2 External Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T LIME: Live Intrinsic Material Estimation : %U http://hdl.handle.net/21.11116/0000-0001-40D9-2 %U http://arxiv.org/abs/1801.01075 %D 2018 %X We present the first end to end approach for real time material estimation for general object shapes with uniform material that only requires a single color image as input. In addition to Lambertian surface properties, our approach fully automatically computes the specular albedo, material shininess, and a foreground segmentation. We tackle this challenging and ill posed inverse rendering problem using recent advances in image to image translation techniques based on deep convolutional encoder decoder architectures. The underlying core representations of our approach are specular shading, diffuse shading and mirror images, which allow to learn the effective and accurate separation of diffuse and specular albedo. In addition, we propose a novel highly efficient perceptual rendering loss that mimics real world image formation and obtains intermediate results even during run time. The estimation of material parameters at real time frame rates enables exciting mixed reality applications, such as seamless illumination consistent integration of virtual objects into real world scenes, and virtual material cloning. We demonstrate our approach in a live setup, compare it to the state of the art, and demonstrate its effectiveness through quantitative and qualitative evaluation. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Meka, A., Maximov, M., Zollhöfer, M., et al. 2018b. LIME: Live Intrinsic Material Estimation. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2018), IEEE.
Export
BibTeX
@inproceedings{Meka:2018, TITLE = {{LIME}: {L}ive Intrinsic Material Estimation}, AUTHOR = {Meka, Abhimitra and Maximov, Maxim and Zollh{\"o}fer, Michael and Chatterjee, Avishek and Seidel, Hans-Peter and Richardt, Christian and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-5386-6420-9}, DOI = {10.1109/CVPR.2018.00661}, PUBLISHER = {IEEE}, YEAR = {2018}, BOOKTITLE = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2018)}, PAGES = {6315--6324}, ADDRESS = {Salt Lake City, UT, USA}, }
Endnote
%0 Conference Proceedings %A Meka, Abhimitra %A Maximov, Maxim %A Zollh&#246;fer, Michael %A Chatterjee, Avishek %A Seidel, Hans-Peter %A Richardt, Christian %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T LIME: Live Intrinsic Material Estimation : %G eng %U http://hdl.handle.net/21.11116/0000-0002-F391-7 %R 10.1109/CVPR.2018.00661 %D 2018 %B 31st IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2018-06-18 - 2018-06-22 %C Salt Lake City, UT, USA %B IEEE/CVF Conference on Computer Vision and Pattern Recognition %P 6315 - 6324 %I IEEE %@ 978-1-5386-6420-9 %U http://gvv.mpi-inf.mpg.de/projects/LIME/
Leimkühler, T., Singh, G., Myszkowski, K., Seidel, H.-P., and Ritschel, T. 2018a. End-to-end Sampling Patterns. http://arxiv.org/abs/1806.06710.
(arXiv: 1806.06710)
Abstract
Sample patterns have many uses in Computer Graphics, ranging from procedural object placement over Monte Carlo image synthesis to non-photorealistic depiction. Their properties such as discrepancy, spectra, anisotropy, or progressiveness have been analyzed extensively. However, designing methods to produce sampling patterns with certain properties can require substantial hand-crafting effort, both in coding, mathematical derivation and compute time. In particular, there is no systematic way to derive the best sampling algorithm for a specific end-task. Tackling this issue, we suggest another level of abstraction: a toolkit to end-to-end optimize over all sampling methods to find the one producing user-prescribed properties such as discrepancy or a spectrum that best fit the end-task. A user simply implements the forward losses and the sampling method is found automatically -- without coding or mathematical derivation -- by making use of back-propagation abilities of modern deep learning frameworks. While this optimization takes long, at deployment time the sampling method is quick to execute as iterated unstructured non-linear filtering using radial basis functions (RBFs) to represent high-dimensional kernels. Several important previous methods are special cases of this approach, which we compare to previous work and demonstrate its usefulness in several typical Computer Graphics applications. Finally, we propose sampling patterns with properties not shown before, such as high-dimensional blue noise with projective properties.
Export
BibTeX
@online{Leimkuehler_arXiv1806.06710, TITLE = {End-to-end Sampling Patterns}, AUTHOR = {Leimk{\"u}hler, Thomas and Singh, Gurprit and Myszkowski, Karol and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1806.06710}, EPRINT = {1806.06710}, EPRINTTYPE = {arXiv}, YEAR = {2018}, ABSTRACT = {Sample patterns have many uses in Computer Graphics, ranging from procedural object placement over Monte Carlo image synthesis to non-photorealistic depiction. Their properties such as discrepancy, spectra, anisotropy, or progressiveness have been analyzed extensively. However, designing methods to produce sampling patterns with certain properties can require substantial hand-crafting effort, both in coding, mathematical derivation and compute time. In particular, there is no systematic way to derive the best sampling algorithm for a specific end-task. Tackling this issue, we suggest another level of abstraction: a toolkit to end-to-end optimize over all sampling methods to find the one producing user-prescribed properties such as discrepancy or a spectrum that best fit the end-task. A user simply implements the forward losses and the sampling method is found automatically -- without coding or mathematical derivation -- by making use of back-propagation abilities of modern deep learning frameworks. While this optimization takes long, at deployment time the sampling method is quick to execute as iterated unstructured non-linear filtering using radial basis functions (RBFs) to represent high-dimensional kernels. Several important previous methods are special cases of this approach, which we compare to previous work and demonstrate its usefulness in several typical Computer Graphics applications. Finally, we propose sampling patterns with properties not shown before, such as high-dimensional blue noise with projective properties.}, }
Endnote
%0 Report %A Leimk&#252;hler, Thomas %A Singh, Gurprit %A Myszkowski, Karol %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T End-to-end Sampling Patterns : %G eng %U http://hdl.handle.net/21.11116/0000-0002-1376-4 %U http://arxiv.org/abs/1806.06710 %D 2018 %X Sample patterns have many uses in Computer Graphics, ranging from procedural object placement over Monte Carlo image synthesis to non-photorealistic depiction. Their properties such as discrepancy, spectra, anisotropy, or progressiveness have been analyzed extensively. However, designing methods to produce sampling patterns with certain properties can require substantial hand-crafting effort, both in coding, mathematical derivation and compute time. In particular, there is no systematic way to derive the best sampling algorithm for a specific end-task. Tackling this issue, we suggest another level of abstraction: a toolkit to end-to-end optimize over all sampling methods to find the one producing user-prescribed properties such as discrepancy or a spectrum that best fit the end-task. A user simply implements the forward losses and the sampling method is found automatically -- without coding or mathematical derivation -- by making use of back-propagation abilities of modern deep learning frameworks. While this optimization takes long, at deployment time the sampling method is quick to execute as iterated unstructured non-linear filtering using radial basis functions (RBFs) to represent high-dimensional kernels. Several important previous methods are special cases of this approach, which we compare to previous work and demonstrate its usefulness in several typical Computer Graphics applications. Finally, we propose sampling patterns with properties not shown before, such as high-dimensional blue noise with projective properties. %K Computer Science, Graphics, cs.GR
Leimkühler, T., Kellnhofer, P., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2018b. Perceptual Real-Time 2D-to-3D Conversion Using Cue Fusion. IEEE Transactions on Visualization and Computer Graphics24, 6.
Export
BibTeX
@article{Leimkuehler2018, TITLE = {Perceptual real-time {2D}-to-{3D} conversion using cue fusion}, AUTHOR = {Leimk{\"u}hler, Thomas and Kellnhofer, Petr and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2017.2703612}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {New York, NY}, YEAR = {2018}, DATE = {2018}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics}, VOLUME = {24}, NUMBER = {6}, PAGES = {2037--2050}, }
Endnote
%0 Journal Article %A Leimk&#252;hler, Thomas %A Kellnhofer, Petr %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptual Real-Time 2D-to-3D Conversion Using Cue Fusion : %G eng %U http://hdl.handle.net/21.11116/0000-0001-409A-9 %R 10.1109/TVCG.2017.2703612 %7 2018 %D 2018 %J IEEE Transactions on Visualization and Computer Graphics %V 24 %N 6 %& 2037 %P 2037 - 2050 %I IEEE Computer Society %C New York, NY %@ false
Leimkühler, T., Seidel, H.-P., and Ritschel, T. 2018c. Laplacian Kernel Splatting for Efficient Depth-of-field and Motion Blur Synthesis or Reconstruction. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2018)37, 4.
Export
BibTeX
@article{LeimkuehlerSIGGRAPH2018, TITLE = {Laplacian Kernel Splatting for Efficient Depth-of-field and Motion Blur Synthesis or Reconstruction}, AUTHOR = {Leimk{\"u}hler, Thomas and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3197517.3201379}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2018}, DATE = {2018}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {37}, NUMBER = {4}, PAGES = {1--11}, EID = {55}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2018}, }
Endnote
%0 Journal Article %A Leimk&#252;hler, Thomas %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Laplacian Kernel Splatting for Efficient Depth-of-field and Motion Blur Synthesis or Reconstruction : %G eng %U http://hdl.handle.net/21.11116/0000-0002-0630-1 %R 10.1145/3197517.3201379 %7 2018 %D 2018 %J ACM Transactions on Graphics %V 37 %N 4 %& 1 %P 1 - 11 %Z sequence number: 55 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2018 %O ACM SIGGRAPH 2018 Vancouver, Canada , 12 - 16 August
Golla, B., Seidel, H.-P., and Chen, R. 2018. Piecewise Linear Mapping Optimization Based on the Complex View. Computer Graphics Forum (Proc. Pacific Graphics 2018)37, 7.
Export
BibTeX
@article{Golla_PG2018, TITLE = {Piecewise Linear Mapping Optimization Based on the Complex View}, AUTHOR = {Golla, Bj{\"o}rn and Seidel, Hans-Peter and Chen, Renjie}, LANGUAGE = {eng}, ISSN = {1467-8659}, DOI = {10.1111/cgf.13563}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2018}, DATE = {2018}, JOURNAL = {Computer Graphics Forum (Proc. Pacific Graphics)}, VOLUME = {37}, NUMBER = {7}, PAGES = {233--243}, BOOKTITLE = {The 26th Pacific Conference on Computer Graphics and Applications (Pacific Graphics 2018)}, }
Endnote
%0 Journal Article %A Golla, Bj&#246;rn %A Seidel, Hans-Peter %A Chen, Renjie %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Piecewise Linear Mapping Optimization Based on the Complex View : %G eng %U http://hdl.handle.net/21.11116/0000-0002-72CD-7 %R 10.1111/cgf.13563 %7 2018 %D 2018 %J Computer Graphics Forum %V 37 %N 7 %& 233 %P 233 - 243 %I Wiley-Blackwell %C Oxford, UK %@ false %B The 26th Pacific Conference on Computer Graphics and Applications %O Pacific Graphics 2018 PG 2018 Hong Kong, 8-11 October 2018
Beigpour, S., Shekhar, S., Mansouryar, M., Myszkowski, K., and Seidel, H.-P. 2018. Light-Field Appearance Editing Based on Intrinsic Decomposition. Journal of Perceptual Imaging1, 1.
Export
BibTeX
@article{Beigpour2018, TITLE = {Light-Field Appearance Editing Based on Intrinsic Decomposition}, AUTHOR = {Beigpour, Shida and Shekhar, Sumit and Mansouryar, Mohsen and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.2352/J.Percept.Imaging.2018.1.1.010502}, YEAR = {2018}, JOURNAL = {Journal of Perceptual Imaging}, VOLUME = {1}, NUMBER = {1}, PAGES = {1--15}, EID = {10502}, }
Endnote
%0 Journal Article %A Beigpour, Shida %A Shekhar, Sumit %A Mansouryar, Mohsen %A Myszkowski, Karol %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Light-Field Appearance Editing Based on Intrinsic Decomposition : %G eng %U http://hdl.handle.net/21.11116/0000-0001-5F88-C %R 10.2352/J.Percept.Imaging.2018.1.1.010502 %7 2018 %D 2018 %J Journal of Perceptual Imaging %O JPI %V 1 %N 1 %& 1 %P 1 - 15 %Z sequence number: 10502
2017
Zayer, R., Steinberger, M., and Seidel, H.-P. 2017a. Sparse Matrix Assembly on the GPU Through Multiplication Patterns. IEEE High Performance Extreme Computing Conference (HPEC 2017), IEEE.
Export
BibTeX
@inproceedings{Zayer_HPEC2017, TITLE = {Sparse Matrix Assembly on the {GPU} Through Multiplication Patterns}, AUTHOR = {Zayer, Rhaleb and Steinberger, Markus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-5386-3472-1}, DOI = {10.1109/HPEC.2017.8091057}, PUBLISHER = {IEEE}, YEAR = {2017}, DATE = {2017}, BOOKTITLE = {IEEE High Performance Extreme Computing Conference (HPEC 2017)}, PAGES = {1--8}, ADDRESS = {Waltham, MA, USA}, }
Endnote
%0 Conference Proceedings %A Zayer, Rhaleb %A Steinberger, Markus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Sparse Matrix Assembly on the GPU Through Multiplication Patterns : %G eng %U http://hdl.handle.net/21.11116/0000-0000-3B33-5 %R 10.1109/HPEC.2017.8091057 %D 2017 %B IEEE High Performance Extreme Computing Conference %Z date of event: 2017-09-12 - 2017-09-14 %C Waltham, MA, USA %B IEEE High Performance Extreme Computing Conference %P 1 - 8 %I IEEE %@ 978-1-5386-3472-1
Zayer, R., Steinberger, M., and Seidel, H.-P. 2017b. A GPU-adapted Structure for Unstructured Grids. Computer Graphics Forum (Proc. EUROGRAPHICS 2017)36, 2.
Export
BibTeX
@article{Zayer2017, TITLE = {A {GPU}-adapted Structure for Unstructured Grids}, AUTHOR = {Zayer, Rhaleb and Steinberger, Markus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13144}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2017}, DATE = {2017}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {36}, NUMBER = {2}, PAGES = {495--507}, BOOKTITLE = {The European Association for Computer Graphics 38th Annual Conference (EUROGRAPHICS 2017)}, }
Endnote
%0 Journal Article %A Zayer, Rhaleb %A Steinberger, Markus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A GPU-adapted Structure for Unstructured Grids : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-5A05-7 %R 10.1111/cgf.13144 %7 2017 %D 2017 %J Computer Graphics Forum %V 36 %N 2 %& 495 %P 495 - 507 %I Wiley-Blackwell %C Oxford %@ false %B The European Association for Computer Graphics 38th Annual Conference %O EUROGRAPHICS 2017 Lyon, France, 24-28 April 2017 EG 2017
Xu, W., Chatterjee, A., Zollhöfer, M., et al. 2017. MonoPerfCap: Human Performance Capture from Monocular Video. http://arxiv.org/abs/1708.02136.
(arXiv: 1708.02136)
Abstract
We present the first marker-less approach for temporally coherent 3D performance capture of a human with general clothing from monocular video. Our approach reconstructs articulated human skeleton motion as well as medium-scale non-rigid surface deformations in general scenes. Human performance capture is a challenging problem due to the large range of articulation, potentially fast motion, and considerable non-rigid deformations, even from multi-view data. Reconstruction from monocular video alone is drastically more challenging, since strong occlusions and the inherent depth ambiguity lead to a highly ill-posed reconstruction problem. We tackle these challenges by a novel approach that employs sparse 2D and 3D human pose detections from a convolutional neural network using a batch-based pose estimation strategy. Joint recovery of per-batch motion allows to resolve the ambiguities of the monocular reconstruction problem based on a low dimensional trajectory subspace. In addition, we propose refinement of the surface geometry based on fully automatically extracted silhouettes to enable medium-scale non-rigid alignment. We demonstrate state-of-the-art performance capture results that enable exciting applications such as video editing and free viewpoint video, previously infeasible from monocular video. Our qualitative and quantitative evaluation demonstrates that our approach significantly outperforms previous monocular methods in terms of accuracy, robustness and scene complexity that can be handled.
Export
BibTeX
@online{Xu2017, TITLE = {{MonoPerfCap}: Human Performance Capture from Monocular Video}, AUTHOR = {Xu, Weipeng and Chatterjee, Avishek and Zollh{\"o}fer, Michael and Rhodin, Helge and Mehta, Dushyant and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1708.02136}, EPRINT = {1708.02136}, EPRINTTYPE = {arXiv}, YEAR = {2017}, ABSTRACT = {We present the first marker-less approach for temporally coherent 3D performance capture of a human with general clothing from monocular video. Our approach reconstructs articulated human skeleton motion as well as medium-scale non-rigid surface deformations in general scenes. Human performance capture is a challenging problem due to the large range of articulation, potentially fast motion, and considerable non-rigid deformations, even from multi-view data. Reconstruction from monocular video alone is drastically more challenging, since strong occlusions and the inherent depth ambiguity lead to a highly ill-posed reconstruction problem. We tackle these challenges by a novel approach that employs sparse 2D and 3D human pose detections from a convolutional neural network using a batch-based pose estimation strategy. Joint recovery of per-batch motion allows to resolve the ambiguities of the monocular reconstruction problem based on a low dimensional trajectory subspace. In addition, we propose refinement of the surface geometry based on fully automatically extracted silhouettes to enable medium-scale non-rigid alignment. We demonstrate state-of-the-art performance capture results that enable exciting applications such as video editing and free viewpoint video, previously infeasible from monocular video. Our qualitative and quantitative evaluation demonstrates that our approach significantly outperforms previous monocular methods in terms of accuracy, robustness and scene complexity that can be handled.}, }
Endnote
%0 Report %A Xu, Weipeng %A Chatterjee, Avishek %A Zollh&#246;fer, Michael %A Rhodin, Helge %A Mehta, Dushyant %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T MonoPerfCap: Human Performance Capture from Monocular Video : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002E-05C2-9 %U http://arxiv.org/abs/1708.02136 %D 2017 %X We present the first marker-less approach for temporally coherent 3D performance capture of a human with general clothing from monocular video. Our approach reconstructs articulated human skeleton motion as well as medium-scale non-rigid surface deformations in general scenes. Human performance capture is a challenging problem due to the large range of articulation, potentially fast motion, and considerable non-rigid deformations, even from multi-view data. Reconstruction from monocular video alone is drastically more challenging, since strong occlusions and the inherent depth ambiguity lead to a highly ill-posed reconstruction problem. We tackle these challenges by a novel approach that employs sparse 2D and 3D human pose detections from a convolutional neural network using a batch-based pose estimation strategy. Joint recovery of per-batch motion allows to resolve the ambiguities of the monocular reconstruction problem based on a low dimensional trajectory subspace. In addition, we propose refinement of the surface geometry based on fully automatically extracted silhouettes to enable medium-scale non-rigid alignment. We demonstrate state-of-the-art performance capture results that enable exciting applications such as video editing and free viewpoint video, previously infeasible from monocular video. Our qualitative and quantitative evaluation demonstrates that our approach significantly outperforms previous monocular methods in terms of accuracy, robustness and scene complexity that can be handled. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR
Wang, Z., Martinez Esturo, J., Seidel, H.-P., and Weinkauf, T. 2017. Stream Line–Based Pattern Search in Flows. Computer Graphics Forum36, 8.
Export
BibTeX
@article{Wang:Esturo:Seidel:Weinkauf2016, TITLE = {Stream Line--Based Pattern Search in Flows}, AUTHOR = {Wang, Zhongjie and Martinez Esturo, Janick and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12990}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2017}, DATE = {2017}, JOURNAL = {Computer Graphics Forum}, VOLUME = {36}, NUMBER = {8}, PAGES = {7--18}, }
Endnote
%0 Journal Article %A Wang, Zhongjie %A Martinez Esturo, Janick %A Seidel, Hans-Peter %A Weinkauf, Tino %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Stream Line&#8211;Based Pattern Search in Flows : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-4301-A %R 10.1111/cgf.12990 %7 2016 %D 2017 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 36 %N 8 %& 7 %P 7 - 18 %I Blackwell-Wiley %C Oxford %@ false
Steinberger, M., Zayer, R., and Seidel, H.-P. 2017. Globally Homogeneous, Locally Adaptive Sparse Matrix-Vector Multiplication on the GPU. ICS 2017, International Conference on Supercomputing, ACM.
Export
BibTeX
@inproceedings{SteinbergerICS2017, TITLE = {Globally Homogeneous, Locally Adaptive Sparse Matrix-Vector Multiplication on the {GPU}}, AUTHOR = {Steinberger, Markus and Zayer, Rhaleb and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4503-5020-4}, DOI = {10.1145/3079079.3079086}, PUBLISHER = {ACM}, YEAR = {2017}, DATE = {2017}, BOOKTITLE = {ICS 2017, International Conference on Supercomputing}, PAGES = {1--11}, EID = {13}, ADDRESS = {Chicago, IL, USA}, }
Endnote
%0 Conference Proceedings %A Steinberger, Markus %A Zayer, Rhaleb %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Globally Homogeneous, Locally Adaptive Sparse Matrix-Vector Multiplication on the GPU : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-7D71-2 %R 10.1145/3079079.3079086 %D 2017 %B International Conference on Supercomputing %Z date of event: 2017-06-14 - 2017-06-16 %C Chicago, IL, USA %B ICS 2017 %P 1 - 11 %Z sequence number: 13 %I ACM %@ 978-1-4503-5020-4
Saikia, H., Seidel, H.-P., and Weinkauf, T. 2017. Fast Similarity Search in Scalar Fields using Merging Histograms. In: Topological Methods in Data Analysis and Visualization IV. Springer, Cham.
Export
BibTeX
@incollection{Saikia_Seidel_Weinkauf2017, TITLE = {Fast Similarity Search in Scalar Fields using Merging Histograms}, AUTHOR = {Saikia, Himangshu and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, ISBN = {978-3-319-44682-0}, DOI = {10.1007/978-3-319-44684-4_7}, PUBLISHER = {Springer}, ADDRESS = {Cham}, YEAR = {2017}, DATE = {2017}, BOOKTITLE = {Topological Methods in Data Analysis and Visualization IV}, EDITOR = {Carr, Hamish and Garth, Christoph and Weinkauf, Tino}, PAGES = {121--134}, SERIES = {Mathematics and Visualization}, }
Endnote
%0 Book Section %A Saikia, Himangshu %A Seidel, Hans-Peter %A Weinkauf, Tino %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Fast Similarity Search in Scalar Fields using Merging Histograms : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-772A-0 %R 10.1007/978-3-319-44684-4_7 %D 2017 %B Topological Methods in Data Analysis and Visualization IV %E Carr, Hamish; Garth, Christoph; Weinkauf, Tino %P 121 - 134 %I Springer %C Cham %@ 978-3-319-44682-0 %S Mathematics and Visualization
Nalbach, O., Seidel, H.-P., and Ritschel, T. 2017a. Practical Capture and Reproduction of Phosphorescent Appearance. Computer Graphics Forum (Proc. EUROGRAPHICS 2017)36, 2.
Export
BibTeX
@article{Nalbach2017, TITLE = {Practical Capture and Reproduction of Phosphorescent Appearance}, AUTHOR = {Nalbach, Oliver and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13136}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2017}, DATE = {2017}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {36}, NUMBER = {2}, PAGES = {409--420}, BOOKTITLE = {The European Association for Computer Graphics 38th Annual Conference (EUROGRAPHICS 2017)}, }
Endnote
%0 Journal Article %A Nalbach, Oliver %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Practical Capture and Reproduction of Phosphorescent Appearance : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-4A53-9 %R 10.1111/cgf.13136 %7 2017 %D 2017 %J Computer Graphics Forum %V 36 %N 2 %& 409 %P 409 - 420 %I Wiley-Blackwell %C Oxford %@ false %B The European Association for Computer Graphics 38th Annual Conference %O EUROGRAPHICS 2017 Lyon, France, 24-28 April 2017 EG 2017
Nalbach, O., Arabadzhiyska, E., Mehta, D., Seidel, H.-P., and Ritschel, T. 2017b. Deep Shading: Convolutional Neural Networks for Screen Space Shading. Computer Graphics Forum (Proc. Eurographics Symposium on Rendering 2017)36, 4.
Export
BibTeX
@article{NalbachEGSR2017, TITLE = {Deep Shading: {C}onvolutional Neural Networks for Screen Space Shading}, AUTHOR = {Nalbach, Oliver and Arabadzhiyska, Elena and Mehta, Dushyant and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13225}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2017}, DATE = {2017}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Rendering)}, VOLUME = {36}, NUMBER = {4}, PAGES = {65--78}, BOOKTITLE = {Eurographics Symposium on Rendering 2017}, EDITOR = {Zwicker, Matthias and Sander, Pedro}, }
Endnote
%0 Journal Article %A Nalbach, Oliver %A Arabadzhiyska, Elena %A Mehta, Dushyant %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Deep Shading: Convolutional Neural Networks for Screen Space Shading : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-CD86-6 %R 10.1111/cgf.13225 %7 2017 %D 2017 %J Computer Graphics Forum %V 36 %N 4 %& 65 %P 65 - 78 %I Wiley-Blackwell %C Oxford %@ false %B Eurographics Symposium on Rendering 2017 %O Eurographics Symposium on Rendering 2017 EGSR 2017 Helsinki, Finland, 19-21 June 2017
Mehta, D., Sridhar, S., Sotnychenko, O., et al. 2017a. VNect: Real-time 3D Human Pose Estimation with a Single RGB Camera. http://arxiv.org/abs/1705.01583.
(arXiv: 1705.01583)
Abstract
We present the first real-time method to capture the full global 3D skeletal pose of a human in a stable, temporally consistent manner using a single RGB camera. Our method combines a new convolutional neural network (CNN) based pose regressor with kinematic skeleton fitting. Our novel fully-convolutional pose formulation regresses 2D and 3D joint positions jointly in real time and does not require tightly cropped input frames. A real-time kinematic skeleton fitting method uses the CNN output to yield temporally stable 3D global pose reconstructions on the basis of a coherent kinematic skeleton. This makes our approach the first monocular RGB method usable in real-time applications such as 3D character control---thus far, the only monocular methods for such applications employed specialized RGB-D cameras. Our method's accuracy is quantitatively on par with the best offline 3D monocular RGB pose estimation methods. Our results are qualitatively comparable to, and sometimes better than, results from monocular RGB-D approaches, such as the Kinect. However, we show that our approach is more broadly applicable than RGB-D solutions, i.e. it works for outdoor scenes, community videos, and low quality commodity RGB cameras.
Export
BibTeX
@online{MehtaArXiv2017, TITLE = {{VNect}: Real-time {3D} Human Pose Estimation with a Single {RGB} Camera}, AUTHOR = {Mehta, Dushyant and Sridhar, Srinath and Sotnychenko, Oleksandr and Rhodin, Helge and Shafiei, Mohammad and Seidel, Hans-Peter and Xu, Weipeng and Casas, Dan and Theobalt, Christian}, URL = {http://arxiv.org/abs/1705.01583}, DOI = {10.1145/3072959.3073596}, EPRINT = {1705.01583}, EPRINTTYPE = {arXiv}, YEAR = {2017}, ABSTRACT = {We present the first real-time method to capture the full global 3D skeletal pose of a human in a stable, temporally consistent manner using a single RGB camera. Our method combines a new convolutional neural network (CNN) based pose regressor with kinematic skeleton fitting. Our novel fully-convolutional pose formulation regresses 2D and 3D joint positions jointly in real time and does not require tightly cropped input frames. A real-time kinematic skeleton fitting method uses the CNN output to yield temporally stable 3D global pose reconstructions on the basis of a coherent kinematic skeleton. This makes our approach the first monocular RGB method usable in real-time applications such as 3D character control---thus far, the only monocular methods for such applications employed specialized RGB-D cameras. Our method's accuracy is quantitatively on par with the best offline 3D monocular RGB pose estimation methods. Our results are qualitatively comparable to, and sometimes better than, results from monocular RGB-D approaches, such as the Kinect. However, we show that our approach is more broadly applicable than RGB-D solutions, i.e. it works for outdoor scenes, community videos, and low quality commodity RGB cameras.}, }
Endnote
%0 Report %A Mehta, Dushyant %A Sridhar, Srinath %A Sotnychenko, Oleksandr %A Rhodin, Helge %A Shafiei, Mohammad %A Seidel, Hans-Peter %A Xu, Weipeng %A Casas, Dan %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T VNect: Real-time 3D Human Pose Estimation with a Single RGB Camera : %U http://hdl.handle.net/11858/00-001M-0000-002D-7D78-3 %R 10.1145/3072959.3073596 %U http://arxiv.org/abs/1705.01583 %D 2017 %X We present the first real-time method to capture the full global 3D skeletal pose of a human in a stable, temporally consistent manner using a single RGB camera. Our method combines a new convolutional neural network (CNN) based pose regressor with kinematic skeleton fitting. Our novel fully-convolutional pose formulation regresses 2D and 3D joint positions jointly in real time and does not require tightly cropped input frames. A real-time kinematic skeleton fitting method uses the CNN output to yield temporally stable 3D global pose reconstructions on the basis of a coherent kinematic skeleton. This makes our approach the first monocular RGB method usable in real-time applications such as 3D character control---thus far, the only monocular methods for such applications employed specialized RGB-D cameras. Our method's accuracy is quantitatively on par with the best offline 3D monocular RGB pose estimation methods. Our results are qualitatively comparable to, and sometimes better than, results from monocular RGB-D approaches, such as the Kinect. However, we show that our approach is more broadly applicable than RGB-D solutions, i.e. it works for outdoor scenes, community videos, and low quality commodity RGB cameras. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR
Mehta, D., Sridhar, S., Sotnychenko, O., et al. 2017b. VNect: Real-Time 3D Human Pose Estimation With a Single RGB Camera. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2017)36, 4.
Export
BibTeX
@article{MehtaSIGGRAPH2017, TITLE = {{VNect}: {R}eal-Time {3D} Human Pose Estimation With a Single {RGB} Camera}, AUTHOR = {Mehta, Dushyant and Sridhar, Srinath and Sotnychenko, Oleksandr and Rhodin, Helge and Shafiei, Mohammad and Seidel, Hans-Peter and Xu, Weipeng and Casas, Dan and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3072959.3073596}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2017}, DATE = {2017}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {36}, NUMBER = {4}, PAGES = {1--14}, EID = {44}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2017}, }
Endnote
%0 Journal Article %A Mehta, Dushyant %A Sridhar, Srinath %A Sotnychenko, Oleksandr %A Rhodin, Helge %A Shafiei, Mohammad %A Seidel, Hans-Peter %A Xu, Weipeng %A Casas, Dan %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T VNect: Real-Time 3D Human Pose Estimation With a Single RGB Camera : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-7D95-0 %R 10.1145/3072959.3073596 %7 2017 %D 2017 %J ACM Transactions on Graphics %V 36 %N 4 %& 1 %P 1 - 14 %Z sequence number: 44 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2017 %O ACM SIGGRAPH 2017 Los Angeles, California, 30 July - 3 August
Leimkühler, T., Seidel, H.-P., and Ritschel, T. 2017. Minimal Warping: Planning Incremental Novel-view Synthesis. Computer Graphics Forum (Proc. Eurographics Symposium on Rendering 2017)36, 4.
Export
BibTeX
@article{LeimkuehlerEGSR2017, TITLE = {Minimal Warping: {P}lanning Incremental Novel-view Synthesis}, AUTHOR = {Leimk{\"u}hler, Thomas and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13219}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2017}, DATE = {2017}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Rendering)}, VOLUME = {36}, NUMBER = {4}, PAGES = {1--14}, BOOKTITLE = {Eurographics Symposium on Rendering 2017}, EDITOR = {Zwicker, Matthias and Sander, Pedro}, }
Endnote
%0 Journal Article %A Leimk&#252;hler, Thomas %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Minimal Warping: Planning Incremental Novel-view Synthesis : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-CD7C-D %R 10.1111/cgf.13219 %7 2017 %D 2017 %J Computer Graphics Forum %V 36 %N 4 %& 1 %P 1 - 14 %I Wiley-Blackwell %C Oxford %@ false %B Eurographics Symposium on Rendering 2017 %O Eurographics Symposium on Rendering 2017 EGSR 2017 Helsinki, Finland, 19-21 June 2017
Kol, T.R., Klehm, O., Seidel, H.-P., and Eisemann, E. 2017. Expressive Single Scattering for Light Shaft Stylization. IEEE Transactions on Visualization and Computer Graphics23, 7.
Export
BibTeX
@article{kol2016expressive, TITLE = {Expressive Single Scattering for Light Shaft Stylization}, AUTHOR = {Kol, Timothy R. and Klehm, Oliver and Seidel, Hans-Peter and Eisemann, Elmar}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2016.2554114}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {New York, NY}, YEAR = {2017}, DATE = {2017}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics}, VOLUME = {23}, NUMBER = {7}, PAGES = {1753--1766}, }
Endnote
%0 Journal Article %A Kol, Timothy R. %A Klehm, Oliver %A Seidel, Hans-Peter %A Eisemann, Elmar %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Expressive Single Scattering for Light Shaft Stylization : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-64E7-2 %R 10.1109/TVCG.2016.2554114 %7 2016-04-14 %D 2017 %J IEEE Transactions on Visualization and Computer Graphics %V 23 %N 7 %& 1753 %P 1753 - 1766 %I IEEE Computer Society %C New York, NY %@ false
Kerbl, B., Kenzel, M., Schmalstieg, D., Seidel, H.-P., and Steinberger, M. 2017. Hierarchical Bucket Queuing for Fine-Grained Priority Scheduling on the GPU. Computer Graphics Forum36, 8.
Export
BibTeX
@article{Seidel_Steinberger2016, TITLE = {Hierarchical Bucket Queuing for Fine-Grained Priority Scheduling on the {GPU}}, AUTHOR = {Kerbl, Bernhard and Kenzel, Michael and Schmalstieg, Dieter and Seidel, Hans-Peter and Steinberger, Markus}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13075}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2017}, DATE = {2017}, JOURNAL = {Computer Graphics Forum}, VOLUME = {36}, NUMBER = {8}, PAGES = {232--246}, }
Endnote
%0 Journal Article %A Kerbl, Bernhard %A Kenzel, Michael %A Schmalstieg, Dieter %A Seidel, Hans-Peter %A Steinberger, Markus %+ External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Hierarchical Bucket Queuing for Fine-Grained Priority Scheduling on the GPU : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-1823-8 %R 10.1111/cgf.13075 %7 2016-12-05 %D 2017 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 36 %N 8 %& 232 %P 232 - 246 %I Blackwell-Wiley %C Oxford %@ false
Jiang, C., Tang, C., Seidel, H.-P., and Wonka, P. 2017. Design and Volume Optimization of Space Structures. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2017)36, 4.
Export
BibTeX
@article{JiangSIGGRAPH2017, TITLE = {Design and Volume Optimization of Space Structures}, AUTHOR = {Jiang, Caigui and Tang, Chengcheng and Seidel, Hans-Peter and Wonka, Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3072959.3073619}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2017}, DATE = {2017}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {36}, NUMBER = {4}, PAGES = {1--14}, EID = {159}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2017}, }
Endnote
%0 Journal Article %A Jiang, Caigui %A Tang, Chengcheng %A Seidel, Hans-Peter %A Wonka, Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Design and Volume Optimization of Space Structures : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-7D8E-2 %R 10.1145/3072959.3073619 %7 2017 %D 2017 %J ACM Transactions on Graphics %V 36 %N 4 %& 1 %P 1 - 14 %Z sequence number: 159 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2017 %O ACM SIGGRAPH 2017 Los Angeles, California, 30 July - 3 August
Haubenwallner, K., Seidel, H.-P., and Steinberger, M. 2017. ShapeGenetics: Using Genetic Algorithms for Procedural Modeling. Computer Graphics Forum (Proc. EUROGRAPHICS 2017)36, 2.
Export
BibTeX
@article{haubenwallner2017shapegenetics, TITLE = {{ShapeGenetics}: {U}sing Genetic Algorithms for Procedural Modeling}, AUTHOR = {Haubenwallner, Karl and Seidel, Hans-Peter and Steinberger, Markus}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13120}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2017}, DATE = {2017}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {36}, NUMBER = {2}, PAGES = {213--223}, BOOKTITLE = {The European Association for Computer Graphics 38th Annual Conference (EUROGRAPHICS 2017)}, }
Endnote
%0 Journal Article %A Haubenwallner, Karl %A Seidel, Hans-Peter %A Steinberger, Markus %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T ShapeGenetics: Using Genetic Algorithms for Procedural Modeling : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-5C69-8 %R 10.1111/cgf.13120 %7 2017 %D 2017 %J Computer Graphics Forum %V 36 %N 2 %& 213 %P 213 - 223 %I Wiley-Blackwell %C Oxford %@ false %B The European Association for Computer Graphics 38th Annual Conference %O EUROGRAPHICS 2017 Lyon, France, 24-28 April 2017 EG 2017
Derler, A., Zayer, R., Seidel, H.-P., and Steinberger, M. 2017. Dynamic Scheduling for Efficient Hierarchical Sparse Matrix Operations on the GPU. ICS 2017, International Conference on Supercomputing, ACM.
Export
BibTeX
@inproceedings{DerlerICS2017, TITLE = {Dynamic Scheduling for Efficient Hierarchical Sparse Matrix Operations on the {GPU}}, AUTHOR = {Derler, Andreas and Zayer, Rhaleb and Seidel, Hans-Peter and Steinberger, Markus}, LANGUAGE = {eng}, ISBN = {978-1-4503-5020-4}, DOI = {10.1145/3079079.3079085}, PUBLISHER = {ACM}, YEAR = {2017}, DATE = {2017}, BOOKTITLE = {ICS 2017, International Conference on Supercomputing}, EID = {7}, ADDRESS = {Chicago, IL, USA}, }
Endnote
%0 Conference Proceedings %A Derler, Andreas %A Zayer, Rhaleb %A Seidel, Hans-Peter %A Steinberger, Markus %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Dynamic Scheduling for Efficient Hierarchical Sparse Matrix Operations on the GPU : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-7D73-D %R 10.1145/3079079.3079085 %D 2017 %B International Conference on Supercomputing %Z date of event: 2017-06-13 - 2017-06-16 %C Chicago, IL, USA %B ICS 2017 %Z sequence number: 7 %I ACM %@ 978-1-4503-5020-4
Arabadzhiyska, E., Tursun, O.T., Myszkowski, K., Seidel, H.-P., and Didyk, P. 2017. Saccade Landing Position Prediction for Gaze-Contingent Rendering. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2017)36, 4.
Export
BibTeX
@article{ArabadzhiyskaSIGGRAPH2017, TITLE = {Saccade Landing Position Prediction for Gaze-Contingent Rendering}, AUTHOR = {Arabadzhiyska, Elena and Tursun, Okan Tarhan and Myszkowski, Karol and Seidel, Hans-Peter and Didyk, Piotr}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3072959.3073642}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2017}, DATE = {2017}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {36}, NUMBER = {4}, PAGES = {1--12}, EID = {50}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2017}, }
Endnote
%0 Journal Article %A Arabadzhiyska, Elena %A Tursun, Okan Tarhan %A Myszkowski, Karol %A Seidel, Hans-Peter %A Didyk, Piotr %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Saccade Landing Position Prediction for Gaze-Contingent Rendering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-7D82-9 %R 10.1145/3072959.3073642 %7 2017 %D 2017 %J ACM Transactions on Graphics %V 36 %N 4 %& 1 %P 1 - 12 %Z sequence number: 50 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2017 %O ACM SIGGRAPH 2017 Los Angeles, California, 30 July - 3 August
Adhikarla, V.K., Vinkler, M., Sumin, D., et al. 2017a. Towards a Quality Metric for Dense Light Fields. http://arxiv.org/abs/1704.07576.
(arXiv: 1704.07576)
Abstract
Light fields become a popular representation of three dimensional scenes, and there is interest in their processing, resampling, and compression. As those operations often result in loss of quality, there is a need to quantify it. In this work, we collect a new dataset of dense reference and distorted light fields as well as the corresponding quality scores which are scaled in perceptual units. The scores were acquired in a subjective experiment using an interactive light-field viewing setup. The dataset contains typical artifacts that occur in light-field processing chain due to light-field reconstruction, multi-view compression, and limitations of automultiscopic displays. We test a number of existing objective quality metrics to determine how well they can predict the quality of light fields. We find that the existing image quality metrics provide good measures of light-field quality, but require dense reference light- fields for optimal performance. For more complex tasks of comparing two distorted light fields, their performance drops significantly, which reveals the need for new, light-field-specific metrics.
Export
BibTeX
@online{AdhikarlaArXiv17, TITLE = {Towards a Quality Metric for Dense Light Fields}, AUTHOR = {Adhikarla, Vamsi Kiran and Vinkler, Marek and Sumin, Denis and Mantiuk, Rafa{\l} K. and Myszkowski, Karol and Seidel, Hans-Peter and Didyk, Piotr}, URL = {http://arxiv.org/abs/1704.07576}, EPRINT = {1704.07576}, EPRINTTYPE = {arXiv}, YEAR = {2017}, ABSTRACT = {Light fields become a popular representation of three dimensional scenes, and there is interest in their processing, resampling, and compression. As those operations often result in loss of quality, there is a need to quantify it. In this work, we collect a new dataset of dense reference and distorted light fields as well as the corresponding quality scores which are scaled in perceptual units. The scores were acquired in a subjective experiment using an interactive light-field viewing setup. The dataset contains typical artifacts that occur in light-field processing chain due to light-field reconstruction, multi-view compression, and limitations of automultiscopic displays. We test a number of existing objective quality metrics to determine how well they can predict the quality of light fields. We find that the existing image quality metrics provide good measures of light-field quality, but require dense reference light- fields for optimal performance. For more complex tasks of comparing two distorted light fields, their performance drops significantly, which reveals the need for new, light-field-specific metrics.}, }
Endnote
%0 Report %A Adhikarla, Vamsi Kiran %A Vinkler, Marek %A Sumin, Denis %A Mantiuk, Rafa&#322; K. %A Myszkowski, Karol %A Seidel, Hans-Peter %A Didyk, Piotr %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Towards a Quality Metric for Dense Light Fields : %U http://hdl.handle.net/11858/00-001M-0000-002D-2C2C-1 %U http://arxiv.org/abs/1704.07576 %D 2017 %X Light fields become a popular representation of three dimensional scenes, and there is interest in their processing, resampling, and compression. As those operations often result in loss of quality, there is a need to quantify it. In this work, we collect a new dataset of dense reference and distorted light fields as well as the corresponding quality scores which are scaled in perceptual units. The scores were acquired in a subjective experiment using an interactive light-field viewing setup. The dataset contains typical artifacts that occur in light-field processing chain due to light-field reconstruction, multi-view compression, and limitations of automultiscopic displays. We test a number of existing objective quality metrics to determine how well they can predict the quality of light fields. We find that the existing image quality metrics provide good measures of light-field quality, but require dense reference light- fields for optimal performance. For more complex tasks of comparing two distorted light fields, their performance drops significantly, which reveals the need for new, light-field-specific metrics. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Adhikarla, V.K., Vinkler, M., Sumin, D., et al. 2017b. Towards a Quality Metric for Dense Light Fields. 30th IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2017), IEEE Computer Society.
Export
BibTeX
@inproceedings{Vamsi2017, TITLE = {Towards a Quality Metric for Dense Light Fields}, AUTHOR = {Adhikarla, Vamsi Kiran and Vinkler, Marek and Sumin, Denis and Mantiuk, Rafa{\l} and Myszkowski, Karol and Seidel, Hans-Peter and Didyk, Piotr}, LANGUAGE = {eng}, ISBN = {978-1-5386-0458-8}, DOI = {10.1109/CVPR.2017.396}, PUBLISHER = {IEEE Computer Society}, YEAR = {2017}, DATE = {2017}, BOOKTITLE = {30th IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2017)}, PAGES = {3720--3729}, ADDRESS = {Honolulu, HI, USA}, }
Endnote
%0 Conference Proceedings %A Adhikarla, Vamsi Kiran %A Vinkler, Marek %A Sumin, Denis %A Mantiuk, Rafa&#322; %A Myszkowski, Karol %A Seidel, Hans-Peter %A Didyk, Piotr %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Towards a Quality Metric for Dense Light Fields : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-E476-3 %R 10.1109/CVPR.2017.396 %D 2017 %B 30th IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2017-07-21 - 2017-07-26 %C Honolulu, HI, USA %B 30th IEEE Conference on Computer Vision and Pattern Recognition %P 3720 - 3729 %I IEEE Computer Society %@ 978-1-5386-0458-8
2016
Wang, Z., Seidel, H.-P., and Weinkauf, T. 2016. Multi-field Pattern Matching Based on Sparse Feature Sampling. IEEE Transactions on Visualization and Computer Graphics22, 1.
Export
BibTeX
@article{Wang2015, TITLE = {Multi-field Pattern Matching Based on Sparse Feature Sampling}, AUTHOR = {Wang, Zhongjie and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2015.2467292}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {New York, NY}, YEAR = {2016}, DATE = {2016}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics}, VOLUME = {22}, NUMBER = {1}, PAGES = {807--816}, }
Endnote
%0 Journal Article %A Wang, Zhongjie %A Seidel, Hans-Peter %A Weinkauf, Tino %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Multi-field Pattern Matching Based on Sparse Feature Sampling : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0029-1A76-6 %R 10.1109/TVCG.2015.2467292 %7 2015 %D 2016 %J IEEE Transactions on Visualization and Computer Graphics %V 22 %N 1 %& 807 %P 807 - 816 %I IEEE Computer Society %C New York, NY %@ false
Von Radziewsky, P., Eisemann, E., Seidel, H.-P., and Hildebrandt, K. 2016. Optimized Subspaces for Deformation-based Modeling and Shape Interpolation. Computers and Graphics (Proc. SMI 2016)58.
Export
BibTeX
@article{Radziewsky2016, TITLE = {Optimized Subspaces for Deformation-based Modeling and Shape Interpolation}, AUTHOR = {von Radziewsky, Philipp and Eisemann, Elmar and Seidel, Hans-Peter and Hildebrandt, Klaus}, LANGUAGE = {eng}, ISSN = {0097-8493}, DOI = {10.1016/j.cag.2016.05.016}, PUBLISHER = {Elsevier}, ADDRESS = {Amsterdam}, YEAR = {2016}, DATE = {2016}, JOURNAL = {Computers and Graphics (Proc. SMI)}, VOLUME = {58}, PAGES = {128--138}, BOOKTITLE = {Shape Modeling International 2016 (SMI 2016)}, }
Endnote
%0 Journal Article %A von Radziewsky, Philipp %A Eisemann, Elmar %A Seidel, Hans-Peter %A Hildebrandt, Klaus %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Optimized Subspaces for Deformation-based Modeling and Shape Interpolation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-0144-0 %R 10.1016/j.cag.2016.05.016 %7 2016 %D 2016 %J Computers and Graphics %V 58 %& 128 %P 128 - 138 %I Elsevier %C Amsterdam %@ false %B Shape Modeling International 2016 %O SMI 2016
Templin, K., Didyk, P., Myszkowski, K., and Seidel, H.-P. 2016. Emulating Displays with Continuously Varying Frame Rates. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2016)35, 4.
Export
BibTeX
@article{TemplinSIGGRAPH2016, TITLE = {Emulating Displays with Continuously Varying Frame Rates}, AUTHOR = {Templin, Krzysztof and Didyk, Piotr and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2897824.2925879}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2016}, DATE = {2016}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {35}, NUMBER = {4}, PAGES = {1--11}, EID = {67}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2016}, }
Endnote
%0 Journal Article %A Templin, Krzysztof %A Didyk, Piotr %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Emulating Displays with Continuously Varying Frame Rates : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-018D-E %R 10.1145/2897824.2925879 %7 2016 %D 2016 %J ACM Transactions on Graphics %V 35 %N 4 %& 1 %P 1 - 11 %Z sequence number: 67 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2016 %O ACM SIGGRAPH 2016 Anaheim, California
Steinberger, M., Derler, A., Zayer, R., and Seidel, H.-P. 2016. How Naive is Naive SpMV on the GPU? IEEE High Performance Extreme Computing Conference (HPEC 2016), IEEE.
Export
BibTeX
@inproceedings{SteinbergerHPEC2016, TITLE = {How naive is naive {SpMV} on the {GPU}?}, AUTHOR = {Steinberger, Markus and Derler, Andreas and Zayer, Rhaleb and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-5090-3525-0}, DOI = {10.1109/HPEC.2016.7761634}, PUBLISHER = {IEEE}, YEAR = {2016}, DATE = {2016}, BOOKTITLE = {IEEE High Performance Extreme Computing Conference (HPEC 2016)}, PAGES = {1--8}, ADDRESS = {Waltham, MA, USA}, }
Endnote
%0 Conference Proceedings %A Steinberger, Markus %A Derler, Andreas %A Zayer, Rhaleb %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T How Naive is Naive SpMV on the GPU? : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-98A5-F %R 10.1109/HPEC.2016.7761634 %D 2016 %B IEEE High Performance Extreme Computing Conference %Z date of event: 2016-09-13 - 2016-09-15 %C Waltham, MA, USA %B IEEE High Performance Extreme Computing Conference %P 1 - 8 %I IEEE %@ 978-1-5090-3525-0
Serrano, A., Gutierrez, D., Myszkowski, K., Seidel, H.-P., and Masia, B. 2016a. Intuitive Editing of Material Appearance. ACM SIGGRAPH 2016 Posters.
Export
BibTeX
@inproceedings{SerranoSIGGRAPH2016, TITLE = {Intuitive Editing of Material Appearance}, AUTHOR = {Serrano, Ana and Gutierrez, Diego and Myszkowski, Karol and Seidel, Hans-Peter and Masia, Belen}, LANGUAGE = {eng}, ISBN = {978-1-4503-4371-8}, DOI = {10.1145/2945078.2945141}, PUBLISHER = {ACM}, YEAR = {2016}, DATE = {2016}, BOOKTITLE = {ACM SIGGRAPH 2016 Posters}, PAGES = {1--2}, EID = {63}, ADDRESS = {Anaheim, CA, USA}, }
Endnote
%0 Generic %A Serrano, Ana %A Gutierrez, Diego %A Myszkowski, Karol %A Seidel, Hans-Peter %A Masia, Belen %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Intuitive Editing of Material Appearance : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-0170-C %R 10.1145/2945078.2945141 %D 2016 %Z name of event: 43rd International Conference and Exhibition on Computer Graphics & Interactive Techniques %Z date of event: 2016-07-24 - 2016-07-28 %Z place of event: Anaheim, CA, USA %B ACM SIGGRAPH 2016 Posters %P 1 - 2 %Z sequence number: 63 %@ 978-1-4503-4371-8
Serrano, A., Gutierrez, D., Myszkowski, K., Seidel, H.-P., and Masia, B. 2016b. An Intuitive Control Space for Material Appearance. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2016)35, 6.
Export
BibTeX
@article{Serrano_MaterialAppearance_2016, TITLE = {An Intuitive Control Space for Material Appearance}, AUTHOR = {Serrano, Ana and Gutierrez, Diego and Myszkowski, Karol and Seidel, Hans-Peter and Masia, Belen}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2980179.2980242}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2016}, DATE = {2016}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {35}, NUMBER = {6}, EID = {186}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2016}, }
Endnote
%0 Journal Article %A Serrano, Ana %A Gutierrez, Diego %A Myszkowski, Karol %A Seidel, Hans-Peter %A Masia, Belen %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T An Intuitive Control Space for Material Appearance : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-82B8-9 %R 10.1145/2980179.2980242 %7 2016 %D 2016 %J ACM Transactions on Graphics %O TOG %V 35 %N 6 %Z sequence number: 186 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2016 %O ACM SIGGRAPH Asia 2016
Robertini, N., Casas, D., Rhodin, H., Seidel, H.-P., and Theobalt, C. 2016. Model-Based Outdoor Performance Capture. Fourth International Conference on 3D Vision, IEEE Computer Society.
Export
BibTeX
@inproceedings{Robertini:2016, TITLE = {Model-Based Outdoor Performance Capture}, AUTHOR = {Robertini, Nadia and Casas, Dan and Rhodin, Helge and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-5090-5407-7}, URL = {http://gvv.mpi-inf.mpg.de/projects/OutdoorPerfcap/}, DOI = {10.1109/3DV.2016.25}, PUBLISHER = {IEEE Computer Society}, YEAR = {2016}, DATE = {2016}, BOOKTITLE = {Fourth International Conference on 3D Vision}, PAGES = {166--175}, ADDRESS = {Stanford, CA, USA}, }
Endnote
%0 Conference Proceedings %A Robertini, Nadia %A Casas, Dan %A Rhodin, Helge %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Model-Based Outdoor Performance Capture : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-4A6D-2 %R 10.1109/3DV.2016.25 %U http://gvv.mpi-inf.mpg.de/projects/OutdoorPerfcap/ %D 2016 %B Fourth International Conference on 3D Vision %Z date of event: 2016-10-25 - 2016-10-28 %C Stanford, CA, USA %B Fourth International Conference on 3D Vision %P 166 - 175 %I IEEE Computer Society %@ 978-1-5090-5407-7
Rhodin, H., Robertini, N., Richardt, C., Seidel, H.-P., and Theobalt, C. 2016a. A Versatile Scene Model with Differentiable Visibility Applied to Generative Pose Estimation. http://arxiv.org/abs/1602.03725.
(arXiv: 1602.03725)
Abstract
Generative reconstruction methods compute the 3D configuration (such as pose and/or geometry) of a shape by optimizing the overlap of the projected 3D shape model with images. Proper handling of occlusions is a big challenge, since the visibility function that indicates if a surface point is seen from a camera can often not be formulated in closed form, and is in general discrete and non-differentiable at occlusion boundaries. We present a new scene representation that enables an analytically differentiable closed-form formulation of surface visibility. In contrast to previous methods, this yields smooth, analytically differentiable, and efficient to optimize pose similarity energies with rigorous occlusion handling, fewer local minima, and experimentally verified improved convergence of numerical optimization. The underlying idea is a new image formation model that represents opaque objects by a translucent medium with a smooth Gaussian density distribution which turns visibility into a smooth phenomenon. We demonstrate the advantages of our versatile scene model in several generative pose estimation problems, namely marker-less multi-object pose estimation, marker-less human motion capture with few cameras, and image-based 3D geometry estimation.
Export
BibTeX
@online{Rhodin2016arXiv1602.03725, TITLE = {A Versatile Scene Model with Differentiable Visibility Applied to Generative Pose Estimation}, AUTHOR = {Rhodin, Helge and Robertini, Nadia and Richardt, Christian and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1602.03725}, EPRINT = {1602.03725}, EPRINTTYPE = {arXiv}, YEAR = {2016}, ABSTRACT = {Generative reconstruction methods compute the 3D configuration (such as pose and/or geometry) of a shape by optimizing the overlap of the projected 3D shape model with images. Proper handling of occlusions is a big challenge, since the visibility function that indicates if a surface point is seen from a camera can often not be formulated in closed form, and is in general discrete and non-differentiable at occlusion boundaries. We present a new scene representation that enables an analytically differentiable closed-form formulation of surface visibility. In contrast to previous methods, this yields smooth, analytically differentiable, and efficient to optimize pose similarity energies with rigorous occlusion handling, fewer local minima, and experimentally verified improved convergence of numerical optimization. The underlying idea is a new image formation model that represents opaque objects by a translucent medium with a smooth Gaussian density distribution which turns visibility into a smooth phenomenon. We demonstrate the advantages of our versatile scene model in several generative pose estimation problems, namely marker-less multi-object pose estimation, marker-less human motion capture with few cameras, and image-based 3D geometry estimation.}, }
Endnote
%0 Report %A Rhodin, Helge %A Robertini, Nadia %A Richardt, Christian %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Intel Visual Computing Institute Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Versatile Scene Model with Differentiable Visibility Applied to Generative Pose Estimation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-9875-C %U http://arxiv.org/abs/1602.03725 %D 2016 %X Generative reconstruction methods compute the 3D configuration (such as pose and/or geometry) of a shape by optimizing the overlap of the projected 3D shape model with images. Proper handling of occlusions is a big challenge, since the visibility function that indicates if a surface point is seen from a camera can often not be formulated in closed form, and is in general discrete and non-differentiable at occlusion boundaries. We present a new scene representation that enables an analytically differentiable closed-form formulation of surface visibility. In contrast to previous methods, this yields smooth, analytically differentiable, and efficient to optimize pose similarity energies with rigorous occlusion handling, fewer local minima, and experimentally verified improved convergence of numerical optimization. The underlying idea is a new image formation model that represents opaque objects by a translucent medium with a smooth Gaussian density distribution which turns visibility into a smooth phenomenon. We demonstrate the advantages of our versatile scene model in several generative pose estimation problems, namely marker-less multi-object pose estimation, marker-less human motion capture with few cameras, and image-based 3D geometry estimation. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Rhodin, H., Robertini, N., Casas, D., Richardt, C., Seidel, H.-P., and Theobalt, C. 2016b. General Automatic Human Shape and Motion Capture Using Volumetric Contour Cues. http://arxiv.org/abs/1607.08659.
(arXiv: 1607.08659)
Abstract
Markerless motion capture algorithms require a 3D body with properly personalized skeleton dimension and/or body shape and appearance to successfully track a person. Unfortunately, many tracking methods consider model personalization a different problem and use manual or semi-automatic model initialization, which greatly reduces applicability. In this paper, we propose a fully automatic algorithm that jointly creates a rigged actor model commonly used for animation - skeleton, volumetric shape, appearance, and optionally a body surface - and estimates the actor's motion from multi-view video input only. The approach is rigorously designed to work on footage of general outdoor scenes recorded with very few cameras and without background subtraction. Our method uses a new image formation model with analytic visibility and analytically differentiable alignment energy. For reconstruction, 3D body shape is approximated as Gaussian density field. For pose and shape estimation, we minimize a new edge-based alignment energy inspired by volume raycasting in an absorbing medium. We further propose a new statistical human body model that represents the body surface, volumetric Gaussian density, as well as variability in skeleton shape. Given any multi-view sequence, our method jointly optimizes the pose and shape parameters of this model fully automatically in a spatiotemporal way.
Export
BibTeX
@online{Rhodin2016arXiv1607.08659, TITLE = {General Automatic Human Shape and Motion Capture Using Volumetric Contour Cues}, AUTHOR = {Rhodin, Helge and Robertini, Nadia and Casas, Dan and Richardt, Christian and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1607.08659}, EPRINT = {1607.08659}, EPRINTTYPE = {arXiv}, YEAR = {2016}, ABSTRACT = {Markerless motion capture algorithms require a 3D body with properly personalized skeleton dimension and/or body shape and appearance to successfully track a person. Unfortunately, many tracking methods consider model personalization a different problem and use manual or semi-automatic model initialization, which greatly reduces applicability. In this paper, we propose a fully automatic algorithm that jointly creates a rigged actor model commonly used for animation -- skeleton, volumetric shape, appearance, and optionally a body surface -- and estimates the actor's motion from multi-view video input only. The approach is rigorously designed to work on footage of general outdoor scenes recorded with very few cameras and without background subtraction. Our method uses a new image formation model with analytic visibility and analytically differentiable alignment energy. For reconstruction, 3D body shape is approximated as Gaussian density field. For pose and shape estimation, we minimize a new edge-based alignment energy inspired by volume raycasting in an absorbing medium. We further propose a new statistical human body model that represents the body surface, volumetric Gaussian density, as well as variability in skeleton shape. Given any multi-view sequence, our method jointly optimizes the pose and shape parameters of this model fully automatically in a spatiotemporal way.}, }
Endnote
%0 Report %A Rhodin, Helge %A Robertini, Nadia %A Casas, Dan %A Richardt, Christian %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Intel Visual Computing Institute Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T General Automatic Human Shape and Motion Capture Using Volumetric Contour Cues : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-9883-C %U http://arxiv.org/abs/1607.08659 %D 2016 %X Markerless motion capture algorithms require a 3D body with properly personalized skeleton dimension and/or body shape and appearance to successfully track a person. Unfortunately, many tracking methods consider model personalization a different problem and use manual or semi-automatic model initialization, which greatly reduces applicability. In this paper, we propose a fully automatic algorithm that jointly creates a rigged actor model commonly used for animation - skeleton, volumetric shape, appearance, and optionally a body surface - and estimates the actor's motion from multi-view video input only. The approach is rigorously designed to work on footage of general outdoor scenes recorded with very few cameras and without background subtraction. Our method uses a new image formation model with analytic visibility and analytically differentiable alignment energy. For reconstruction, 3D body shape is approximated as Gaussian density field. For pose and shape estimation, we minimize a new edge-based alignment energy inspired by volume raycasting in an absorbing medium. We further propose a new statistical human body model that represents the body surface, volumetric Gaussian density, as well as variability in skeleton shape. Given any multi-view sequence, our method jointly optimizes the pose and shape parameters of this model fully automatically in a spatiotemporal way. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Rhodin, H., Richardt, C., Casas, D., et al. 2016c. EgoCap: Egocentric Marker-less Motion Capture with Two Fisheye Cameras (Extended Abstract). http://arxiv.org/abs/1701.00142.
(arXiv: 1701.00142)
Abstract
Marker-based and marker-less optical skeletal motion-capture methods use an outside-in arrangement of cameras placed around a scene, with viewpoints converging on the center. They often create discomfort by possibly needed marker suits, and their recording volume is severely restricted and often constrained to indoor scenes with controlled backgrounds. We therefore propose a new method for real-time, marker-less and egocentric motion capture which estimates the full-body skeleton pose from a lightweight stereo pair of fisheye cameras that are attached to a helmet or virtual-reality headset. It combines the strength of a new generative pose estimation framework for fisheye views with a ConvNet-based body-part detector trained on a new automatically annotated and augmented dataset. Our inside-in method captures full-body motion in general indoor and outdoor scenes, and also crowded scenes.
Export
BibTeX
@online{DBLP:journals/corr/RhodinRCISSST17, TITLE = {{EgoCap}: Egocentric Marker-less Motion Capture with Two Fisheye Cameras (Extended Abstract)}, AUTHOR = {Rhodin, Helge and Richardt, Christian and Casas, Dan and Insafutdinov, Eldar and Shafiei, Mohammad and Seidel, Hans-Peter and Schiele, Bernt and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1701.00142}, EPRINT = {1701.00142}, EPRINTTYPE = {arXiv}, YEAR = {2016}, ABSTRACT = {Marker-based and marker-less optical skeletal motion-capture methods use an outside-in arrangement of cameras placed around a scene, with viewpoints converging on the center. They often create discomfort by possibly needed marker suits, and their recording volume is severely restricted and often constrained to indoor scenes with controlled backgrounds. We therefore propose a new method for real-time, marker-less and egocentric motion capture which estimates the full-body skeleton pose from a lightweight stereo pair of fisheye cameras that are attached to a helmet or virtual-reality headset. It combines the strength of a new generative pose estimation framework for fisheye views with a ConvNet-based body-part detector trained on a new automatically annotated and augmented dataset. Our inside-in method captures full-body motion in general indoor and outdoor scenes, and also crowded scenes.}, }
Endnote
%0 Report %A Rhodin, Helge %A Richardt, Christian %A Casas, Dan %A Insafutdinov, Eldar %A Shafiei, Mohammad %A Seidel, Hans-Peter %A Schiele, Bernt %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T EgoCap: Egocentric Marker-less Motion Capture with Two Fisheye Cameras (Extended Abstract) : %G eng %U http://hdl.handle.net/21.11116/0000-0000-3B3D-B %U http://arxiv.org/abs/1701.00142 %D 2016 %X Marker-based and marker-less optical skeletal motion-capture methods use an outside-in arrangement of cameras placed around a scene, with viewpoints converging on the center. They often create discomfort by possibly needed marker suits, and their recording volume is severely restricted and often constrained to indoor scenes with controlled backgrounds. We therefore propose a new method for real-time, marker-less and egocentric motion capture which estimates the full-body skeleton pose from a lightweight stereo pair of fisheye cameras that are attached to a helmet or virtual-reality headset. It combines the strength of a new generative pose estimation framework for fisheye views with a ConvNet-based body-part detector trained on a new automatically annotated and augmented dataset. Our inside-in method captures full-body motion in general indoor and outdoor scenes, and also crowded scenes. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Rhodin, H., Richardt, C., Casas, D., et al. 2016d. EgoCap: Egocentric Marker-less Motion Capture with Two Fisheye Cameras. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2016)35, 6.
Export
BibTeX
@article{Rhodin2016SGA, TITLE = {{EgoCap}: {E}gocentric Marker-less Motion Capture with Two Fisheye Cameras}, AUTHOR = {Rhodin, Helge and Richardt, Christian and Casas, Dan and Insafutdinov, Eldar and Shafiei, Mohammad and Seidel, Hans-Peter and Schiele, Bernt and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2980179.2980235}, PUBLISHER = {Association for Computing Machinery}, ADDRESS = {New York, NY}, YEAR = {2016}, DATE = {2016}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {35}, NUMBER = {6}, PAGES = {1--11}, EID = {162}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2016}, }
Endnote
%0 Journal Article %A Rhodin, Helge %A Richardt, Christian %A Casas, Dan %A Insafutdinov, Eldar %A Shafiei, Mohammad %A Seidel, Hans-Peter %A Schiele, Bernt %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Intel Visual Computing Institute University of Bath Computer Graphics, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T EgoCap: Egocentric Marker-less Motion Capture with Two Fisheye Cameras : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-8321-6 %R 10.1145/2980179.2980235 %7 2016 %D 2016 %J ACM Transactions on Graphics %V 35 %N 6 %& 1 %P 1 - 11 %Z sequence number: 162 %I Association for Computing Machinery %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2016 %O ACM SIGGRAPH Asia 2016
Rhodin, H., Robertini, N., Casas, D., Richardt, C., Seidel, H.-P., and Theobalt, C. 2016e. General Automatic Human Shape and Motion Capture Using Volumetric Contour Cues. Computer Vision -- ECCV 2016, Springer.
Export
BibTeX
@inproceedings{RhodinECCV2016, TITLE = {General Automatic Human Shape and Motion Capture Using Volumetric Contour Cues}, AUTHOR = {Rhodin, Helge and Robertini, Nadia and Casas, Dan and Richardt, Christian and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-3-319-46453-4}, DOI = {10.1007/978-3-319-46454-1_31}, PUBLISHER = {Springer}, YEAR = {2016}, DATE = {2016}, BOOKTITLE = {Computer Vision -- ECCV 2016}, DEBUG = {author: Leibe, Bastian; author: Matas, Jiri; author: Sebe, Nicu; author: Welling, Max}, PAGES = {509--526}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {9909}, ADDRESS = {Amsterdam, The Netherlands}, }
Endnote
%0 Conference Proceedings %A Rhodin, Helge %A Robertini, Nadia %A Casas, Dan %A Richardt, Christian %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Intel Visual Computing Institute Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T General Automatic Human Shape and Motion Capture Using Volumetric Contour Cues : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-986D-F %R 10.1007/978-3-319-46454-1_31 %D 2016 %B 14th European Conference on Computer Vision %Z date of event: 2016-10-11 - 2016-10-14 %C Amsterdam, The Netherlands %B Computer Vision -- ECCV 2016 %E Leibe, Bastian; Matas, Jiri; Sebe, Nicu; Welling, Max %P 509 - 526 %I Springer %@ 978-3-319-46453-4 %B Lecture Notes in Computer Science %N 9909 %U https://rdcu.be/dLgG6
Reinert, B., Kopf, J., Ritschel, T., Cuervo, E., Chu, D., and Seidel, H.-P. 2016a. Proxy-guided Image-based Rendering for Mobile Devices. Computer Graphics Forum (Proc. Pacific Graphics 2016)35, 7.
Export
BibTeX
@article{ReinertPG2016, TITLE = {Proxy-guided Image-based Rendering for Mobile Devices}, AUTHOR = {Reinert, Bernhard and Kopf, Johannes and Ritschel, Tobias and Cuervo, Eduardo and Chu, David and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13032}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2016}, DATE = {2016}, JOURNAL = {Computer Graphics Forum (Proc. Pacific Graphics)}, VOLUME = {35}, NUMBER = {7}, PAGES = {353--362}, BOOKTITLE = {The 24th Pacific Conference on Computer Graphics and Applications Short Papers Proceedings (Pacific Graphics 2016)}, }
Endnote
%0 Journal Article %A Reinert, Bernhard %A Kopf, Johannes %A Ritschel, Tobias %A Cuervo, Eduardo %A Chu, David %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Proxy-guided Image-based Rendering for Mobile Devices : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-2DD8-7 %R 10.1111/cgf.13032 %7 2016 %D 2016 %J Computer Graphics Forum %V 35 %N 7 %& 353 %P 353 - 362 %I Blackwell-Wiley %C Oxford %@ false %B The 24th Pacific Conference on Computer Graphics and Applications Short Papers Proceedings %O Pacific Graphics 2016 PG 2016
Reinert, B., Ritschel, T., Seidel, H.-P., and Georgiev, I. 2016b. Projective Blue-Noise Sampling. Computer Graphics Forum35, 1.
Export
BibTeX
@article{ReinertCGF2016, TITLE = {Projective Blue-Noise Sampling}, AUTHOR = {Reinert, Bernhard and Ritschel, Tobias and Seidel, Hans-Peter and Georgiev, Iliyan}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12725}, PUBLISHER = {Wiley}, ADDRESS = {Chichester}, YEAR = {2016}, DATE = {2016}, JOURNAL = {Computer Graphics Forum}, VOLUME = {35}, NUMBER = {1}, PAGES = {285--295}, }
Endnote
%0 Journal Article %A Reinert, Bernhard %A Ritschel, Tobias %A Seidel, Hans-Peter %A Georgiev, Iliyan %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Projective Blue-Noise Sampling : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002A-1A31-D %R 10.1111/cgf.12725 %7 2016 %D 2016 %J Computer Graphics Forum %V 35 %N 1 %& 285 %P 285 - 295 %I Wiley %C Chichester %@ false
Reinert, B., Ritschel, T., and Seidel, H.-P. 2016c. Animated 3D Creatures from Single-view Video by Skeletal Sketching. Graphics Interface 2016, 42nd Graphics Interface Conference, Canadian Information Processing Society.
Export
BibTeX
@inproceedings{Reinert:2016:AnimatedCreatures, TITLE = {Animated {3D} Creatures from Single-view Video by Skeletal Sketching}, AUTHOR = {Reinert, Bernhard and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-0-9947868-1-4}, DOI = {10.20380/GI2016.17}, PUBLISHER = {Canadian Information Processing Society}, YEAR = {2016}, DATE = {2016}, BOOKTITLE = {Graphics Interface 2016, 42nd Graphics Interface Conference}, EDITOR = {Popa, Tiberiu and Moffatt, Karyn}, PAGES = {133--143}, ADDRESS = {Victoria, BC, Canada}, }
Endnote
%0 Conference Proceedings %A Reinert, Bernhard %A Ritschel, Tobias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Animated 3D Creatures from Single-view Video by Skeletal Sketching : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-64EC-7 %R 10.20380/GI2016.17 %D 2016 %B 42nd Graphics Interface Conference %Z date of event: 2016-06-01 - 2016-06-03 %C Victoria, BC, Canada %B Graphics Interface 2016 %E Popa, Tiberiu; Moffatt, Karyn %P 133 - 143 %I Canadian Information Processing Society %@ 978-0-9947868-1-4
Nalbach, O., Arabadzhiyska, E., Mehta, D., Seidel, H.-P., and Ritschel, T. 2016. Deep Shading: Convolutional Neural Networks for Screen-Space Shading. http://arxiv.org/abs/1603.06078.
(arXiv: 1603.06078)
Abstract
In computer vision, Convolutional Neural Networks (CNNs) have recently achieved new levels of performance for several inverse problems where RGB pixel appearance is mapped to attributes such as positions, normals or reflectance. In computer graphics, screen-space shading has recently increased the visual quality in interactive image synthesis, where per-pixel attributes such as positions, normals or reflectance of a virtual 3D scene are converted into RGB pixel appearance, enabling effects like ambient occlusion, indirect light, scattering, depth-of-field, motion blur, or anti-aliasing. In this paper we consider the diagonal problem: synthesizing appearance from given per-pixel attributes using a CNN. The resulting Deep Shading simulates all screen-space effects as well as arbitrary combinations thereof at competitive quality and speed while not being programmed by human experts but learned from example images.
Export
BibTeX
@online{NalbacharXiv2016, TITLE = {Deep Shading: Convolutional Neural Networks for Screen-Space Shading}, AUTHOR = {Nalbach, Oliver and Arabadzhiyska, Elena and Mehta, Dushyant and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1603.06078}, EPRINT = {1603.06078}, EPRINTTYPE = {arXiv}, YEAR = {2016}, ABSTRACT = {In computer vision, Convolutional Neural Networks (CNNs) have recently achieved new levels of performance for several inverse problems where RGB pixel appearance is mapped to attributes such as positions, normals or reflectance. In computer graphics, screen-space shading has recently increased the visual quality in interactive image synthesis, where per-pixel attributes such as positions, normals or reflectance of a virtual 3D scene are converted into RGB pixel appearance, enabling effects like ambient occlusion, indirect light, scattering, depth-of-field, motion blur, or anti-aliasing. In this paper we consider the diagonal problem: synthesizing appearance from given per-pixel attributes using a CNN. The resulting Deep Shading simulates all screen-space effects as well as arbitrary combinations thereof at competitive quality and speed while not being programmed by human experts but learned from example images.}, }
Endnote
%0 Report %A Nalbach, Oliver %A Arabadzhiyska, Elena %A Mehta, Dushyant %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Deep Shading: Convolutional Neural Networks for Screen-Space Shading : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-0174-4 %U http://arxiv.org/abs/1603.06078 %D 2016 %X In computer vision, Convolutional Neural Networks (CNNs) have recently achieved new levels of performance for several inverse problems where RGB pixel appearance is mapped to attributes such as positions, normals or reflectance. In computer graphics, screen-space shading has recently increased the visual quality in interactive image synthesis, where per-pixel attributes such as positions, normals or reflectance of a virtual 3D scene are converted into RGB pixel appearance, enabling effects like ambient occlusion, indirect light, scattering, depth-of-field, motion blur, or anti-aliasing. In this paper we consider the diagonal problem: synthesizing appearance from given per-pixel attributes using a CNN. The resulting Deep Shading simulates all screen-space effects as well as arbitrary combinations thereof at competitive quality and speed while not being programmed by human experts but learned from example images. %K Computer Science, Graphics, cs.GR,Computer Science, Learning, cs.LG
Leimkühler, T., Kellnhofer, P., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2016. Perceptual Real-Time 2D-to-3D Conversion Using Cue Fusion. Graphics Interface 2016, 42nd Graphics Interface Conference, Canadian Information Processing Society.
(Best Student Paper Award)
Export
BibTeX
@inproceedings{LeimkuehlerGI2016, TITLE = {Perceptual real-time {2D}-to-{3D} conversion using cue fusion}, AUTHOR = {Leimk{\"u}hler, Thomas and Kellnhofer, Petr and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-0-9947868-1-4}, DOI = {10.20380/GI2016.02}, PUBLISHER = {Canadian Information Processing Society}, YEAR = {2016}, DATE = {2016}, BOOKTITLE = {Graphics Interface 2016, 42nd Graphics Interface Conference}, EDITOR = {Popa, Tiberiu and Moffatt, Karyn}, PAGES = {5--12}, ADDRESS = {Victoria, Canada}, }
Endnote
%0 Conference Proceedings %A Leimk&#252;hler, Thomas %A Kellnhofer, Petr %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptual Real-Time 2D-to-3D Conversion Using Cue Fusion : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-823D-1 %R 10.20380/GI2016.02 %D 2016 %B 42nd Graphics Interface Conference %Z date of event: 2016-06-01 - 2016-06-03 %C Victoria, Canada %B Graphics Interface 2016 %E Popa, Tiberiu; Moffatt, Karyn %P 5 - 12 %I Canadian Information Processing Society %@ 978-0-9947868-1-4
Kellnhofer, P., Didyk, P., Myszkowski, K., Hefeeda, M.M., Seidel, H.-P., and Matusik, W. 2016a. GazeStereo3D: Seamless Disparity Manipulations. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2016)35, 4.
Export
BibTeX
@article{KellnhoferSIGGRAPH2016, TITLE = {{GazeStereo3D}: {S}eamless Disparity Manipulations}, AUTHOR = {Kellnhofer, Petr and Didyk, Piotr and Myszkowski, Karol and Hefeeda, Mohamed M. and Seidel, Hans-Peter and Matusik, Wojciech}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2897824.2925866}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2016}, DATE = {2016}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {35}, NUMBER = {4}, PAGES = {1--13}, EID = {68}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2016}, }
Endnote
%0 Journal Article %A Kellnhofer, Petr %A Didyk, Piotr %A Myszkowski, Karol %A Hefeeda, Mohamed M. %A Seidel, Hans-Peter %A Matusik, Wojciech %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T GazeStereo3D: Seamless Disparity Manipulations : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-0190-4 %R 10.1145/2897824.2925866 %7 2016 %D 2016 %J ACM Transactions on Graphics %V 35 %N 4 %& 1 %P 1 - 13 %Z sequence number: 68 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2016 %O ACM SIGGRAPH 2016 Anaheim, California
Kellnhofer, P., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2016b. Transformation-aware Perceptual Image Metric. Journal of Electronic Imaging25, 5.
Export
BibTeX
@article{Kellnhofer2016jei, TITLE = {Transformation-aware Perceptual Image Metric}, AUTHOR = {Kellnhofer, Petr and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1017-9909}, DOI = {10.1117/1.JEI.25.5.053014}, PUBLISHER = {SPIE}, ADDRESS = {Bellingham, WA}, YEAR = {2016}, DATE = {2016}, JOURNAL = {Journal of Electronic Imaging}, VOLUME = {25}, NUMBER = {5}, PAGES = {1--16}, EID = {053014}, }
Endnote
%0 Journal Article %A Kellnhofer, Petr %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Transformation-aware Perceptual Image Metric : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-82B3-4 %R 10.1117/1.JEI.25.5.053014 %7 2016 %D 2016 %J Journal of Electronic Imaging %V 25 %N 5 %& 1 %P 1 - 16 %Z sequence number: 053014 %I SPIE %C Bellingham, WA %@ false
Kellnhofer, P., Didyk, P., Ritschel, T., Masia, B., Myszkowski, K., and Seidel, H.-P. 2016c. Motion Parallax in Stereo 3D: Model and Applications. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2016)35, 6.
Export
BibTeX
@article{Kellnhofer2016SGA, TITLE = {Motion Parallax in Stereo {3D}: {M}odel and Applications}, AUTHOR = {Kellnhofer, Petr and Didyk, Piotr and Ritschel, Tobias and Masia, Belen and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2980179.2980230}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2016}, DATE = {2016}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {35}, NUMBER = {6}, PAGES = {1--12}, EID = {176}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2016}, }
Endnote
%0 Journal Article %A Kellnhofer, Petr %A Didyk, Piotr %A Ritschel, Tobias %A Masia, Belen %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Motion Parallax in Stereo 3D: Model and Applications : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-82B6-D %R 10.1145/2980179.2980230 %7 2016 %D 2016 %J ACM Transactions on Graphics %O TOG %V 35 %N 6 %& 1 %P 1 - 12 %Z sequence number: 176 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2016 %O ACM SIGGRAPH Asia 2016
Gryaditskaya, Y., Masia, B., Didyk, P., Myszkowski, K., and Seidel, H.-P. 2016. Gloss Editing in Light Fields. VMV 2016 Vision, Modeling and Visualization, Eurographics Association.
Export
BibTeX
@inproceedings{jgryadit2016, TITLE = {Gloss Editing in Light Fields}, AUTHOR = {Gryaditskaya, Yulia and Masia, Belen and Didyk, Piotr and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-03868-025-3}, DOI = {10.2312/vmv.20161351}, PUBLISHER = {Eurographics Association}, YEAR = {2016}, DATE = {2016}, BOOKTITLE = {VMV 2016 Vision, Modeling and Visualization}, EDITOR = {Hullin, Matthias and Stamminger, Marc and Weinkauf, Tino}, PAGES = {127--135}, ADDRESS = {Bayreuth, Germany}, }
Endnote
%0 Conference Proceedings %A Gryaditskaya, Yulia %A Masia, Belen %A Didyk, Piotr %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Gloss Editing in Light Fields : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-82C5-B %R 10.2312/vmv.20161351 %D 2016 %B 21st International Symposium on Vision, Modeling and Visualization %Z date of event: 2016-10-10 - 2016-10-12 %C Bayreuth, Germany %B VMV 2016 Vision, Modeling and Visualization %E Hullin, Matthias; Stamminger, Marc; Weinkauf, Tino %P 127 - 135 %I Eurographics Association %@ 978-3-03868-025-3
Dąbała, Ł., Ziegler, M., Didyk, P., et al. 2016. Efficient Multi-image Correspondences for On-line Light Field Video Processing. Computer Graphics Forum (Proc. Pacific Graphics 2016)35, 7.
Export
BibTeX
@article{DabalaPG2016, TITLE = {Efficient Multi-image Correspondences for On-line Light Field Video Processing}, AUTHOR = {D{\c a}ba{\l}a, {\L}ukasz and Ziegler, Matthias and Didyk, Piotr and Zilly, Frederik and Keinert, Joachim and Myszkowski, Karol and Rokita, Przemyslaw and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13037}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2016}, DATE = {2016}, JOURNAL = {Computer Graphics Forum (Proc. Pacific Graphics)}, VOLUME = {35}, NUMBER = {7}, PAGES = {401--410}, BOOKTITLE = {The 24th Pacific Conference on Computer Graphics and Applications (Pacific Graphics 2016)}, }
Endnote
%0 Journal Article %A D&#261;ba&#322;a, &#321;ukasz %A Ziegler, Matthias %A Didyk, Piotr %A Zilly, Frederik %A Keinert, Joachim %A Myszkowski, Karol %A Rokita, Przemyslaw %A Ritschel, Tobias %A Seidel, Hans-Peter %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Efficient Multi-image Correspondences for On-line Light Field Video Processing : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-82BA-5 %R 10.1111/cgf.13037 %7 2016 %D 2016 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 35 %N 7 %& 401 %P 401 - 410 %I Blackwell-Wiley %C Oxford %@ false %B The 24th Pacific Conference on Computer Graphics and Applications %O Pacific Graphics 2016 PG 2016
Boechat, P., Dokter, M., Kenzel, M., Seidel, H.-P., Schmalstieg, D., and Steinberger, M. 2016. Representing and Scheduling Procedural Generation using Operator Graphs. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2016)35, 6.
Export
BibTeX
@article{BoaechatSIGGRAPHAsia2016, TITLE = {Representing and Scheduling Procedural Generation using Operator Graphs}, AUTHOR = {Boechat, Pedro and Dokter, Mark and Kenzel, Michael and Seidel, Hans-Peter and Schmalstieg, Dieter and Steinberger, Markus}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2980179.2980227}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2016}, DATE = {2016}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {35}, NUMBER = {6}, PAGES = {1--12}, EID = {183}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2016}, }
Endnote
%0 Journal Article %A Boechat, Pedro %A Dokter, Mark %A Kenzel, Michael %A Seidel, Hans-Peter %A Schmalstieg, Dieter %A Steinberger, Markus %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Representing and Scheduling Procedural Generation using Operator Graphs : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-98BB-0 %R 10.1145/2980179.2980227 %7 2016 %D 2016 %J ACM Transactions on Graphics %O TOG %V 35 %N 6 %& 1 %P 1 - 12 %Z sequence number: 183 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2016 %O ACM SIGGRAPH Asia 2016
2015
Wang, Z., Seidel, H.-P., and Weinkauf, T. 2015. Hierarchical Hashing for Pattern Search in 3D Vector Fields. VMV 2015 Vision, Modeling and Visualization, Eurographics Association.
Export
BibTeX
@inproceedings{WangVMV2015, TITLE = {Hierarchical Hashing for Pattern Search in {3D} Vector Fields}, AUTHOR = {Wang, Zhongjie and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, ISBN = {978-3-905674-95-8}, DOI = {10.2312/vmv.20151256}, PUBLISHER = {Eurographics Association}, YEAR = {2015}, DATE = {2015}, BOOKTITLE = {VMV 2015 Vision, Modeling and Visualization}, EDITOR = {Bommes, David and Ritschel, Tobias and Schultz, Thomas}, PAGES = {41--48}, ADDRESS = {Aachen, Germany}, }
Endnote
%0 Conference Proceedings %A Wang, Zhongjie %A Seidel, Hans-Peter %A Weinkauf, Tino %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Hierarchical Hashing for Pattern Search in 3D Vector Fields : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0028-F760-4 %R 10.2312/vmv.20151256 %D 2015 %B 20th International Symposium on Vision, Modeling and Visualization %Z date of event: 2015-10-07 - 2015-10-10 %C Aachen, Germany %B VMV 2015 Vision, Modeling and Visualization %E Bommes, David; Ritschel, Tobias; Schultz, Thomas %P 41 - 48 %I Eurographics Association %@ 978-3-905674-95-8
Von Tycowicz, C., Schulz, C., Seidel, H.-P., and Hildebrandt, K. 2015. Real-time Nonlinear Shape Interpolation. ACM Transactions on Graphics34, 3.
Export
BibTeX
@article{Tycowicz2015, TITLE = {Real-time Nonlinear Shape Interpolation}, AUTHOR = {von Tycowicz, Christoph and Schulz, Christian and Seidel, Hans-Peter and Hildebrandt, Klaus}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2729972}, PUBLISHER = {Association for Computing Machinery}, ADDRESS = {New York, NY}, YEAR = {2015}, DATE = {2015}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {34}, NUMBER = {3}, PAGES = {1--10}, EID = {34}, }
Endnote
%0 Journal Article %A von Tycowicz, Christoph %A Schulz, Christian %A Seidel, Hans-Peter %A Hildebrandt, Klaus %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Real-time Nonlinear Shape Interpolation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5D65-9 %R 10.1145/2729972 %7 2015 %D 2015 %J ACM Transactions on Graphics %V 34 %N 3 %& 1 %P 1 - 10 %Z sequence number: 34 %I Association for Computing Machinery %C New York, NY %@ false
Schulz, C., von Tycowicz, C., Seidel, H.-P., and Hildebrandt, K. 2015. Animating Articulated Characters Using Wiggly Splines. Proceedings SCA 2015, ACM.
Export
BibTeX
@inproceedings{SchulzSCA2015, TITLE = {Animating Articulated Characters Using Wiggly Splines}, AUTHOR = {Schulz, Christian and von Tycowicz, Christoph and Seidel, Hans-Peter and Hildebrandt, Klaus}, LANGUAGE = {eng}, ISBN = {978-1-4503-3496-9}, DOI = {10.1145/2786784.2786799}, PUBLISHER = {ACM}, YEAR = {2015}, DATE = {2015}, BOOKTITLE = {Proceedings SCA 2015}, PAGES = {101--109}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Schulz, Christian %A von Tycowicz, Christoph %A Seidel, Hans-Peter %A Hildebrandt, Klaus %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Animating Articulated Characters Using Wiggly Splines : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0028-8EA3-0 %R 10.1145/2786784.2786799 %D 2015 %B 14th ACM SIGGRAPH / Eurographics Symposium on Computer Animation %Z date of event: 2015-08-07 - 2015-08-09 %C Los Angeles, CA, USA %B Proceedings SCA 2015 %P 101 - 109 %I ACM %@ 978-1-4503-3496-9
Rhodin, H., Robertini, N., Richardt, C., Seidel, H.-P., and Theobalt, C. 2015a. A Versatile Scene Model With Differentiable Visibility Applied to Generative Pose Estimation. ICCV 2015, IEEE International Conference on Computer Vision, IEEE.
Export
BibTeX
@inproceedings{RhodinICCV2015, TITLE = {A Versatile Scene Model With Differentiable Visibility Applied to Generative Pose Estimation}, AUTHOR = {Rhodin, Helge and Robertini, Nadia and Richardt, Christian and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-4673-8390-5}, DOI = {10.1109/ICCV.2015.94}, PUBLISHER = {IEEE}, YEAR = {2015}, DATE = {2015}, BOOKTITLE = {ICCV 2015, IEEE International Conference on Computer Vision}, PAGES = {765--773}, ADDRESS = {Santiago, Chile}, }
Endnote
%0 Conference Proceedings %A Rhodin, Helge %A Robertini, Nadia %A Richardt, Christian %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Versatile Scene Model With Differentiable Visibility Applied to Generative Pose Estimation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0029-52DC-4 %R 10.1109/ICCV.2015.94 %D 2015 %B IEEE International Conference on Computer Vision %Z date of event: 2015-12-07 - 2015-12-13 %C Santiago, Chile %B ICCV 2015 %P 765 - 773 %I IEEE %@ 978-1-4673-8390-5 %U http://www.cv-foundation.org/openaccess/content_iccv_2015/html/Rhodin_A_Versatile_Scene_ICCV_2015_paper.html
Rhodin, H., Tompkin, J., Kim, K.I., et al. 2015b. Generalizing Wave Gestures from Sparse Examples for Real-time Character Control. ACM Transactions on Graphics34, 6.
Export
BibTeX
@article{DBLP:journals/tog/RhodinTKAPST15, TITLE = {Generalizing Wave Gestures from Sparse Examples for Real-time Character Control}, AUTHOR = {Rhodin, Helge and Tompkin, James and Kim, Kwang In and de Aguiar, Edilson and Pfister, Hanspeter and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2816795.2818082}, PUBLISHER = {Association for Computing Machinery}, ADDRESS = {New York, NY}, YEAR = {2015}, DATE = {2015}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {34}, NUMBER = {6}, PAGES = {181:1--181:12}, }
Endnote
%0 Journal Article %A Rhodin, Helge %A Tompkin, James %A Kim, Kwang In %A de Aguiar, Edilson %A Pfister, Hanspeter %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Generalizing Wave Gestures from Sparse Examples for Real-time Character Control : %G eng %U http://hdl.handle.net/21.11116/0000-000F-7285-A %R 10.1145/2816795.2818082 %D 2015 %J ACM Transactions on Graphics %V 34 %N 6 %& 181:1 %P 181:1 - 181:12 %I Association for Computing Machinery %C New York, NY %@ false
Rhodin, H., Tompkin, J., Kim, K.I., et al. 2015c. Generalizing Wave Gestures from Sparse Examples for Real-time Character Control. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2015)34, 6.
Export
BibTeX
@article{RhodinSAP2015, TITLE = {Generalizing Wave Gestures from Sparse Examples for Real-time Character Control}, AUTHOR = {Rhodin, Helge and Tompkin, James and Kim, Kwang In and de Aguiar, Edilson and Pfister, Hanspeter and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2816795.2818082}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2015}, DATE = {2015}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {34}, NUMBER = {6}, PAGES = {1--12}, EID = {181}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2015}, }
Endnote
%0 Journal Article %A Rhodin, Helge %A Tompkin, James %A Kim, Kwang In %A de Aguiar, Edilson %A Pfister, Hanspeter %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Generalizing Wave Gestures from Sparse Examples for Real-time Character Control : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0029-2476-8 %R 10.1145/2816795.2818082 %7 2015 %D 2015 %J ACM Transactions on Graphics %O TOG %V 34 %N 6 %& 1 %P 1 - 12 %Z sequence number: 181 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2015 %O ACM SIGGRAPH Asia 2015 Kobe, Japan
Nguyen, C., Ritschel, T., and Seidel, H.-P. 2015a. Data-driven Color Manifolds. ACM Transactions on Graphics34, 2.
Export
BibTeX
@article{NguyenTOG2015, TITLE = {Data-driven Color Manifolds}, AUTHOR = {Nguyen, Chuong and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2699645}, PUBLISHER = {Association for Computing Machinery}, ADDRESS = {New York, NY}, YEAR = {2015}, DATE = {2015}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {34}, NUMBER = {2}, PAGES = {1--9}, EID = {20}, }
Endnote
%0 Journal Article %A Nguyen, Chuong %A Ritschel, Tobias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Data-driven Color Manifolds : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-680A-D %R 10.1145/2699645 %7 2015 %D 2015 %J ACM Transactions on Graphics %V 34 %N 2 %& 1 %P 1 - 9 %Z sequence number: 20 %I Association for Computing Machinery %C New York, NY %@ false
Nguyen, C., Nalbach, O., Ritschel, T., and Seidel, H.-P. 2015b. Guiding Image Manipulations Using Shape-appearance Subspaces from Co-alignment of Image Collections. Computer Graphics Forum (Proc. EUROGRAPHICS 2015)34, 2.
Export
BibTeX
@article{NguyenEG2015, TITLE = {Guiding Image Manipulations Using Shape-appearance Subspaces from Co-alignment of Image Collections}, AUTHOR = {Nguyen, Chuong and Nalbach, Oliver and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12548}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2015}, DATE = {2015}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {34}, NUMBER = {2}, PAGES = {143--154}, BOOKTITLE = {The 36th Annual Conference of the European Association of Computer Graphics (EUROGRAPHICS 2015)}, }
Endnote
%0 Journal Article %A Nguyen, Chuong %A Nalbach, Oliver %A Ritschel, Tobias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Guiding Image Manipulations Using Shape-appearance Subspaces from Co-alignment of Image Collections : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5D6A-0 %R 10.1111/cgf.12548 %7 2015 %D 2015 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 34 %N 2 %& 143 %P 143 - 154 %I Blackwell-Wiley %C Oxford %@ false %B The 36th Annual Conference of the European Association of Computer Graphics %O EUROGRAPHICS 2015 4th &#8211; 8th May 2015, Kongresshaus in Z&#252;rich, Switzerland EG 2015
Nalbach, O., Ritschel, T., and Seidel, H.-P. 2015. The Bounced Z-buffer for Indirect Visibility. VMV 2015 Vision, Modeling and Visualization, Eurographics Association.
Export
BibTeX
@inproceedings{NalbachVMV2015, TITLE = {The Bounced {Z}-buffer for Indirect Visibility}, AUTHOR = {Nalbach, Oliver and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-905674-95-8}, DOI = {10.2312/vmv.20151261}, PUBLISHER = {Eurographics Association}, YEAR = {2015}, DATE = {2015}, BOOKTITLE = {VMV 2015 Vision, Modeling and Visualization}, EDITOR = {Bommes, David and Ritschel, Tobias and Schultz, Thomas}, PAGES = {79--86}, ADDRESS = {Aachen, Germany}, }
Endnote
%0 Conference Proceedings %A Nalbach, Oliver %A Ritschel, Tobias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T The Bounced Z-buffer for Indirect Visibility : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0028-F762-F %R 10.2312/vmv.20151261 %D 2015 %B 20th International Symposium on Vision, Modeling and Visualization %Z date of event: 2015-10-07 - 2015-10-10 %C Aachen, Germany %B VMV 2015 Vision, Modeling and Visualization %E Bommes, David; Ritschel, Tobias; Schultz, Thomas %P 79 - 86 %I Eurographics Association %@ 978-3-905674-95-8
Mantiuk, R., Myszkowski, K., and Seidel, H.-P. 2015. High Dynamic Range Imaging. In: Wiley Encyclopedia of Electrical and Electronics Engineering. Wiley, New York, NY.
Export
BibTeX
@incollection{MantiukEncyclopedia2015, TITLE = {High Dynamic Range Imaging}, AUTHOR = {Mantiuk, Rafa{\l} and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1002/047134608X.W8265}, PUBLISHER = {Wiley}, ADDRESS = {New York, NY}, YEAR = {2015}, BOOKTITLE = {Wiley Encyclopedia of Electrical and Electronics Engineering}, EDITOR = {Webster, John G.}, PAGES = {1--42}, }
Endnote
%0 Book Section %A Mantiuk, Rafa&#322; %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T High Dynamic Range Imaging : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-A376-B %R 10.1002/047134608X.W8265 %D 2015 %8 15.06.2015 %B Wiley Encyclopedia of Electrical and Electronics Engineering %E Webster, John G. %P 1 - 42 %I Wiley %C New York, NY
Li, C., Wand, M., Wu, X., and Seidel, H.-P. 2015. Approximate 3D Partial Symmetry Detection Using Co-occurrence Analysis. International Conference on 3D Vision, IEEE.
Export
BibTeX
@inproceedings{Li3DV2015, TITLE = {Approximate {3D} Partial Symmetry Detection Using Co-occurrence Analysis}, AUTHOR = {Li, Chuan and Wand, Michael and Wu, Xiaokun and Seidel, Hans-Peter}, ISBN = {978-1-4673-8333-2}, DOI = {10.1109/3DV.2015.55}, PUBLISHER = {IEEE}, YEAR = {2015}, DATE = {2015}, BOOKTITLE = {International Conference on 3D Vision}, DEBUG = {author: Theobalt, Christian}, EDITOR = {Brown, Michael and Kosecka, Jana}, PAGES = {425--433}, ADDRESS = {Lyon, France}, }
Endnote
%0 Conference Proceedings %A Li, Chuan %A Wand, Michael %A Wu, Xiaokun %A Seidel, Hans-Peter %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Approximate 3D Partial Symmetry Detection Using Co-occurrence Analysis : %U http://hdl.handle.net/11858/00-001M-0000-002B-34D8-0 %R 10.1109/3DV.2015.55 %D 2015 %B International Conference on 3D Vision %Z date of event: 2015-10-19 - 2015-10-22 %C Lyon, France %B International Conference on 3D Vision %E Brown, Michael; Kosecka, Jana; Theobalt, Christian %P 425 - 433 %I IEEE %@ 978-1-4673-8333-2
Klehm, O., Kol, T.R., Seidel, H.-P., and Eisemann, E. 2015. Stylized Scattering via Transfer Functions and Occluder Manipulation. Graphics Interface 2015, Graphics Interface Conference 2015, Canadian Information Processing Society.
Export
BibTeX
@inproceedings{KlehmGI2015, TITLE = {Stylized Scattering via Transfer Functions and Occluder Manipulation}, AUTHOR = {Klehm, Oliver and Kol, Timothy R. and Seidel, Hans-Peter and Eisemann, Elmar}, LANGUAGE = {eng}, ISBN = {978-0-9947868-0-7}, DOI = {10.20380/GI2015.15}, PUBLISHER = {Canadian Information Processing Society}, YEAR = {2015}, DATE = {2015}, BOOKTITLE = {Graphics Interface 2015, Graphics Interface Conference 2015}, EDITOR = {Zhang, Hao Richard and Tang, Tony}, PAGES = {115--121}, ADDRESS = {Halifax, Canada}, }
Endnote
%0 Conference Proceedings %A Klehm, Oliver %A Kol, Timothy R. %A Seidel, Hans-Peter %A Eisemann, Elmar %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Stylized Scattering via Transfer Functions and Occluder Manipulation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0027-D415-8 %R 10.20380/GI2015.15 %D 2015 %B Graphics Interface Conference 2015 %Z date of event: 2015-06-03 - 2015-06-05 %C Halifax, Canada %B Graphics Interface 2015 %E Zhang, Hao Richard; Tang, Tony %P 115 - 121 %I Canadian Information Processing Society %@ 978-0-9947868-0-7
Kellnhofer, P., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2015a. A Transformation-aware Perceptual Image Metric. Human Vision and Electronic Imaging XX (HVEI 2015), SPIE/IS&T.
(Best Student Paper Award)
Abstract
Predicting human visual perception of image differences has several applications such as compression, rendering, editing and retargeting. Current approaches however, ignore the fact that the human visual system compensates for geometric transformations, e.g. we see that an image and a rotated copy are identical. Instead, they will report a large, false-positive difference. At the same time, if the transformations become too strong or too spatially incoherent, comparing two images indeed gets increasingly difficult. Between these two extremes, we propose a system to quantify the effect of transformations, not only on the perception of image differences, but also on saliency and motion parallax. To this end, we first fit local homographies to a given optical flow field and then convert this field into a field of elementary transformations such as translation, rotation, scaling, and perspective. We conduct a perceptual experiment quantifying the increase of difficulty when compensating for elementary transformations. Transformation entropy is proposed as a novel measure of complexity in a flow field. This representation is then used for applications, such as comparison of non-aligned images, where transformations cause threshold elevation, and detection of salient transformations.
Export
BibTeX
@inproceedings{Kellnhofer2015, TITLE = {A Transformation-aware Perceptual Image Metric}, AUTHOR = {Kellnhofer, Petr and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {9781628414844}, DOI = {10.1117/12.2076754}, PUBLISHER = {SPIE/IS\&T}, YEAR = {2015}, DATE = {2015}, ABSTRACT = {Predicting human visual perception of image differences has several applications such as compression, rendering, editing and retargeting. Current approaches however, ignore the fact that the human visual system compensates for geometric transformations, e.g. we see that an image and a rotated copy are identical. Instead, they will report a large, false-positive difference. At the same time, if the transformations become too strong or too spatially incoherent, comparing two images indeed gets increasingly difficult. Between these two extremes, we propose a system to quantify the effect of transformations, not only on the perception of image differences, but also on saliency and motion parallax. To this end, we first fit local homographies to a given optical flow field and then convert this field into a field of elementary transformations such as translation, rotation, scaling, and perspective. We conduct a perceptual experiment quantifying the increase of difficulty when compensating for elementary transformations. Transformation entropy is proposed as a novel measure of complexity in a flow field. This representation is then used for applications, such as comparison of non-aligned images, where transformations cause threshold elevation, and detection of salient transformations.}, BOOKTITLE = {Human Vision and Electronic Imaging XX (HVEI 2015)}, EDITOR = {Rogowitz, Bernice E. and Pappas, Thrasyvoulos N. and de Ridder, Huib}, EID = {939408}, SERIES = {Proceedings of SPIE}, VOLUME = {9394}, ADDRESS = {San Francisco, CA, USA}, }
Endnote
%0 Conference Proceedings %A Kellnhofer, Petr %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Transformation-aware Perceptual Image Metric : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-544A-4 %R 10.1117/12.2076754 %D 2015 %B Human Vision and Electronic Imaging XX %Z date of event: 2015-02-08 - 2015-02-12 %C San Francisco, CA, USA %X Predicting human visual perception of image differences has several applications such as compression, rendering, editing and retargeting. Current approaches however, ignore the fact that the human visual system compensates for geometric transformations, e.g. we see that an image and a rotated copy are identical. Instead, they will report a large, false-positive difference. At the same time, if the transformations become too strong or too spatially incoherent, comparing two images indeed gets increasingly difficult. Between these two extremes, we propose a system to quantify the effect of transformations, not only on the perception of image differences, but also on saliency and motion parallax. To this end, we first fit local homographies to a given optical flow field and then convert this field into a field of elementary transformations such as translation, rotation, scaling, and perspective. We conduct a perceptual experiment quantifying the increase of difficulty when compensating for elementary transformations. Transformation entropy is proposed as a novel measure of complexity in a flow field. This representation is then used for applications, such as comparison of non-aligned images, where transformations cause threshold elevation, and detection of salient transformations. %B Human Vision and Electronic Imaging XX %E Rogowitz, Bernice E.; Pappas, Thrasyvoulos N.; de Ridder, Huib %Z sequence number: 939408 %I SPIE/IS&T %@ 9781628414844 %B Proceedings of SPIE %N 9394
Kellnhofer, P., Leimkühler, T., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2015b. What Makes 2D-to-3D Stereo Conversion Perceptually Plausible? Proceedings SAP 2015, ACM.
(Best Presentation Award)
Export
BibTeX
@inproceedings{Kellnhofer2015SAP, TITLE = {What Makes {2D}-to-{3D} Stereo Conversion Perceptually Plausible?}, AUTHOR = {Kellnhofer, Petr and Leimk{\"u}hler, Thomas and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, ISBN = {978-1-4503-3812-7}, DOI = {10.1145/2804408.2804409}, PUBLISHER = {ACM}, YEAR = {2015}, DATE = {2015}, BOOKTITLE = {Proceedings SAP 2015}, PAGES = {59--66}, ADDRESS = {T{\"u}bingen, Germany}, }
Endnote
%0 Conference Proceedings %A Kellnhofer, Petr %A Leimk&#252;hler, Thomas %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T What Makes 2D-to-3D Stereo Conversion Perceptually Plausible? : %U http://hdl.handle.net/11858/00-001M-0000-0029-2460-7 %R 10.1145/2804408.2804409 %D 2015 %B ACM SIGGRAPH Symposium on Applied Perception %Z date of event: 2015-09-13 - 2015-09-14 %C T&#252;bingen, Germany %B Proceedings SAP 2015 %P 59 - 66 %I ACM %@ 978-1-4503-3812-7 %U http://resources.mpi-inf.mpg.de/StereoCueFusion/WhatMakes3D/
Kellnhofer, P., Ritschel, T., Myszkowski, K., Eisemann, E., and Seidel, H.-P. 2015c. Modeling Luminance Perception at Absolute Threshold. Computer Graphics Forum (Proc. Eurographics Symposium on Rendering 2015)34, 4.
Export
BibTeX
@article{Kellnhofer2015a, TITLE = {Modeling Luminance Perception at Absolute Threshold}, AUTHOR = {Kellnhofer, Petr and Ritschel, Tobias and Myszkowski, Karol and Eisemann, Elmar and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12687}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2015}, DATE = {2015}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Rendering)}, VOLUME = {34}, NUMBER = {4}, PAGES = {155--164}, BOOKTITLE = {Eurographics Symposium on Rendering 2014}, EDITOR = {Lehtinen, Jaakko and Nowrouzezahra, Derek}, }
Endnote
%0 Journal Article %A Kellnhofer, Petr %A Ritschel, Tobias %A Myszkowski, Karol %A Eisemann, Elmar %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Modeling Luminance Perception at Absolute Threshold : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0028-8E8D-4 %R 10.1111/cgf.12687 %7 2015 %D 2015 %J Computer Graphics Forum %V 34 %N 4 %& 155 %P 155 - 164 %I Wiley-Blackwell %C Oxford %@ false %B Eurographics Symposium on Rendering 2014 %O Eurographics Symposium on Rendering 2015 EGSR 2015 Darmstadt, Germany, June 24th - 26th, 2015
Jain, A., Chen, C., Thormählen, T., Metaxas, D., and Seidel, H.-P. 2015. Multi-layer Stencil Creation from Images. Computers and Graphics48.
Export
BibTeX
@article{JainMulti-layer2015, TITLE = {Multi-layer Stencil Creation from Images}, AUTHOR = {Jain, Arjun and Chen, Chao and Thorm{\"a}hlen, Thorsten and Metaxas, Dimitris and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0097-8493}, DOI = {10.1016/j.cag.2015.02.003}, PUBLISHER = {Pergamon}, ADDRESS = {New York, NY}, YEAR = {2015}, DATE = {2015}, JOURNAL = {Computers and Graphics}, VOLUME = {48}, PAGES = {11--22}, }
Endnote
%0 Journal Article %A Jain, Arjun %A Chen, Chao %A Thorm&#228;hlen, Thorsten %A Metaxas, Dimitris %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Multi-layer Stencil Creation from Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0027-9C34-A %R 10.1016/j.cag.2015.02.003 %7 2015-02-26 %D 2015 %J Computers and Graphics %V 48 %& 11 %P 11 - 22 %I Pergamon %C New York, NY %@ false
Herzog, R., Mewes, D., Wand, M., Guibas, L., and Seidel, H.-P. 2015. LeSSS: Learned Shared Semantic Spaces for Relating Multi-modal Representations of 3D Shapes. Computer Graphics Forum (Proc. Eurographics Symposium on Geometric Processing 2015)34, 5.
Export
BibTeX
@article{HerzogSGP2015, TITLE = {{LeSSS}: {L}earned {S}hared {S}emantic {S}paces for Relating Multi-Modal Representations of {3D} Shapes}, AUTHOR = {Herzog, Robert and Mewes, Daniel and Wand, Michael and Guibas, Leonidas and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12703}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Chichester}, YEAR = {2015}, DATE = {2015}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Geometric Processing)}, VOLUME = {34}, NUMBER = {5}, PAGES = {141--151}, BOOKTITLE = {Symposium on Geometry Processing 2015 (Eurographics Symposium on Geometric Processing 2015)}, EDITOR = {Ben-Chen, Mirela and Liu, Ligang}, }
Endnote
%0 Journal Article %A Herzog, Robert %A Mewes, Daniel %A Wand, Michael %A Guibas, Leonidas %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T LeSSS: Learned Shared Semantic Spaces for Relating Multi-modal Representations of 3D Shapes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0028-8E9A-6 %R 10.1111/cgf.12703 %7 2015 %D 2015 %J Computer Graphics Forum %V 34 %N 5 %& 141 %P 141 - 151 %I Wiley-Blackwell %C Chichester %@ false %B Symposium on Geometry Processing 2015 %O Graz, Austria, July 6 - 8, 2015 SGP 2015 Eurographics Symposium on Geometric Processing 2015
Gryaditskaya, Y., Pouli, T., Reinhard, E., Myszkowski, K., and Seidel, H.-P. 2015. Motion Aware Exposure Bracketing for HDR Video. Computer Graphics Forum (Proc. Eurographics Symposium on Rendering 2015)34, 4.
Export
BibTeX
@article{Gryaditskaya2015, TITLE = {Motion Aware Exposure Bracketing for {HDR} Video}, AUTHOR = {Gryaditskaya, Yulia and Pouli, Tania and Reinhard, Erik and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12684}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2015}, DATE = {2015}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Rendering)}, VOLUME = {34}, NUMBER = {4}, PAGES = {119--130}, BOOKTITLE = {Eurographics Symposium on Rendering 2015}, EDITOR = {Lehtinen, Jaakko and Nowrouzezahra, Derek}, }
Endnote
%0 Journal Article %A Gryaditskaya, Yulia %A Pouli, Tania %A Reinhard, Erik %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Motion Aware Exposure Bracketing for HDR Video : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0028-15D2-B %R 10.1111/cgf.12684 %7 2015 %D 2015 %J Computer Graphics Forum %V 34 %N 4 %& 119 %P 119 - 130 %I Wiley-Blackwell %C Oxford %@ false %B Eurographics Symposium on Rendering 2015 %O Eurographics Symposium on Rendering 2015 EGSR 2015 Darmstadt, Germany, June 24th - 26th, 2015
Brandt, C., Seidel, H.-P., and Hildebrandt, K. 2015. Optimal Spline Approximation via ℓ₀-Minimization. Computer Graphics Forum (Proc. EUROGRAPHICS 2015)34, 2.
Export
BibTeX
@article{Brandt2015, TITLE = {Optimal Spline Approximation via $\ell_0$-Minimization}, AUTHOR = {Brandt, Christopher and Seidel, Hans-Peter and Hildebrandt, Klaus}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12589}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2015}, DATE = {2015}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {34}, NUMBER = {2}, PAGES = {617--626}, BOOKTITLE = {The 36th Annual Conference of the European Association of Computer Graphics (EUROGRAPHICS 2015)}, }
Endnote
%0 Journal Article %A Brandt, Christopher %A Seidel, Hans-Peter %A Hildebrandt, Klaus %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Optimal Spline Approximation via &#8467;&#8320;-Minimization : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5D67-5 %R 10.1111/cgf.12589 %7 2015 %D 2015 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 34 %N 2 %& 617 %P 617 - 626 %I Blackwell-Wiley %C Oxford %@ false %B The 36th Annual Conference of the European Association of Computer Graphics %O EUROGRAPHICS 2015 4th - 8th May 2015, Kongresshaus in Z&#252;rich, Switzerland
Arpa, S., Ritschel, T., Myszkowski, K., Çapin, T., and Seidel, H.-P. 2015. Purkinje Images: Conveying Different Content for Different Luminance Adaptations in a Single Image. Computer Graphics Forum34, 1.
Export
BibTeX
@article{arpa2014purkinje, TITLE = {Purkinje Images: {Conveying} Different Content for Different Luminance Adaptations in a Single Image}, AUTHOR = {Arpa, Sami and Ritschel, Tobias and Myszkowski, Karol and {\c C}apin, Tolga and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12463}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2015}, DATE = {2015}, JOURNAL = {Computer Graphics Forum}, VOLUME = {34}, NUMBER = {1}, PAGES = {116--126}, }
Endnote
%0 Journal Article %A Arpa, Sami %A Ritschel, Tobias %A Myszkowski, Karol %A &#199;apin, Tolga %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Purkinje Images: Conveying Different Content for Different Luminance Adaptations in a Single Image : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5D0B-6 %R 10.1111/cgf.12463 %7 2014-10-18 %D 2015 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 34 %N 1 %& 116 %P 116 - 126 %I Blackwell-Wiley %C Oxford %@ false
2014
Wu, X., Li, C., Wand, M., Hildebrandt, K., Jansen, S., and Seidel, H.-P. 2014a. 3D Model Retargeting Using Offset Statistics. 2nd International Conference on 3D Vision, IEEE.
Export
BibTeX
@inproceedings{Wu2014a, TITLE = {{3D} Model Retargeting Using Offset Statistics}, AUTHOR = {Wu, Xiaokun and Li, Chuan and Wand, Michael and Hildebrandt, Klaus and Jansen, Silke and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4799-7000-1}, DOI = {10.1109/3DV.2014.74}, PUBLISHER = {IEEE}, YEAR = {2014}, DATE = {2014}, BOOKTITLE = {2nd International Conference on 3D Vision}, PAGES = {353--360}, ADDRESS = {Tokyo, Japan}, }
Endnote
%0 Conference Proceedings %A Wu, Xiaokun %A Li, Chuan %A Wand, Michael %A Hildebrandt, Klaus %A Jansen, Silke %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T 3D Model Retargeting Using Offset Statistics : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5D63-D %R 10.1109/3DV.2014.74 %D 2014 %B 2nd International Conference on 3D Vision %Z date of event: 2014-12-08 - 2014-12-11 %C Tokyo, Japan %B 2nd International Conference on 3D Vision %P 353 - 360 %I IEEE %@ 978-1-4799-7000-1
Wu, X., Wand, M., Hildebrandt, K., Kohli, P., and Seidel, H.-P. 2014b. Real-time Symmetry-preserving Deformation. Computer Graphics Forum (Proc. Pacific Graphics 2014)33, 7.
Export
BibTeX
@article{Wu2014, TITLE = {Real-time Symmetry-preserving Deformation}, AUTHOR = {Wu, Xiaokun and Wand, Michael and Hildebrandt, Klaus and Kohli, Pushmeet and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1111/cgf.12491}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2014}, DATE = {2014}, JOURNAL = {Computer Graphics Forum (Proc. Pacific Graphics)}, VOLUME = {33}, NUMBER = {7}, PAGES = {229--238}, BOOKTITLE = {22nd Pacific Conference on Computer Graphics and Applications (Pacific Graphics 2014)}, }
Endnote
%0 Journal Article %A Wu, Xiaokun %A Wand, Michael %A Hildebrandt, Klaus %A Kohli, Pushmeet %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Real-time Symmetry-preserving Deformation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-3D08-5 %R 10.1111/cgf.12491 %7 2014-10-28 %D 2014 %J Computer Graphics Forum %V 33 %N 7 %& 229 %P 229 - 238 %I Wiley-Blackwell %C Oxford %B 22nd Pacific Conference on Computer Graphics and Applications %O Pacific Graphics 2014 PG 2014 8 to 10 Oct 2014, Seoul, South Korea
Wang, Z., Martinez Esturo, J., Seidel, H.-P., and Weinkauf, T. 2014. Pattern Search in Flows based on Similarity of Stream Line Segments. VMV 2014 Vision, Modeling and Visualization, Eurographics Association.
Abstract
We propose a method that allows users to define flow features in form<br> of patterns represented as sparse sets of stream line segments. Our<br> approach finds similar occurrences in the same or other time steps.<br> Related approaches define patterns using dense, local stencils or<br> support only single segments. Our patterns are defined sparsely and<br> can have a significant extent, i.e., they are integration-based and<br> not local. This allows for a greater flexibility in defining features<br> of interest. Similarity is measured using intrinsic curve properties<br> only, which enables invariance to location, orientation, and scale.<br> Our method starts with splitting stream lines using globally-consistent<br> segmentation criteria. It strives to maintain the visually apparent<br> features of the flow as a collection of stream line segments. Most<br> importantly, it provides similar segmentations for similar flow structures.<br> For user-defined patterns of curve segments, our algorithm finds<br> similar ones that are invariant to similarity transformations. We<br> showcase the utility of our method using different 2D and 3D flow<br> fields.
Export
BibTeX
@inproceedings{wang14, TITLE = {Pattern Search in Flows based on Similarity of Stream Line Segments}, AUTHOR = {Wang, Zhongjie and Martinez Esturo, Janick and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, DOI = {10.2312/vmv.20141272}, PUBLISHER = {Eurographics Association}, YEAR = {2014}, DATE = {2014-10}, ABSTRACT = {We propose a method that allows users to define flow features in form<br> of patterns represented as sparse sets of stream line segments. Our<br> approach finds similar occurrences in the same or other time steps.<br> Related approaches define patterns using dense, local stencils or<br> support only single segments. Our patterns are defined sparsely and<br> can have a significant extent, i.e., they are integration-based and<br> not local. This allows for a greater flexibility in defining features<br> of interest. Similarity is measured using intrinsic curve properties<br> only, which enables invariance to location, orientation, and scale.<br> Our method starts with splitting stream lines using globally-consistent<br> segmentation criteria. It strives to maintain the visually apparent<br> features of the flow as a collection of stream line segments. Most<br> importantly, it provides similar segmentations for similar flow structures.<br> For user-defined patterns of curve segments, our algorithm finds<br> similar ones that are invariant to similarity transformations. We<br> showcase the utility of our method using different 2D and 3D flow<br> fields.}, BOOKTITLE = {VMV 2014 Vision, Modeling and Visualization}, DEBUG = {author: von Landesberger, Tatiana; author: Theisel, Holger; author: Urban, Philipp}, EDITOR = {Bender, Jan and Kuijper, Arjan}, PAGES = {23--30}, ADDRESS = {Darmstadt, Germany}, }
Endnote
%0 Conference Proceedings %A Wang, Zhongjie %A Martinez Esturo, Janick %A Seidel, Hans-Peter %A Weinkauf, Tino %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Pattern Search in Flows based on Similarity of Stream Line Segments : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5337-3 %R 10.2312/vmv.20141272 %D 2014 %B 19th International Workshop on Vision, Modeling and Visualization %Z date of event: 2014-10-08 - 2014-10-10 %C Darmstadt, Germany %X We propose a method that allows users to define flow features in form<br> of patterns represented as sparse sets of stream line segments. Our<br> approach finds similar occurrences in the same or other time steps.<br> Related approaches define patterns using dense, local stencils or<br> support only single segments. Our patterns are defined sparsely and<br> can have a significant extent, i.e., they are integration-based and<br> not local. This allows for a greater flexibility in defining features<br> of interest. Similarity is measured using intrinsic curve properties<br> only, which enables invariance to location, orientation, and scale.<br> Our method starts with splitting stream lines using globally-consistent<br> segmentation criteria. It strives to maintain the visually apparent<br> features of the flow as a collection of stream line segments. Most<br> importantly, it provides similar segmentations for similar flow structures.<br> For user-defined patterns of curve segments, our algorithm finds<br> similar ones that are invariant to similarity transformations. We<br> showcase the utility of our method using different 2D and 3D flow<br> fields. %B VMV 2014 Vision, Modeling and Visualization %E Bender, Jan; Kuijper, Arjan; von Landesberger, Tatiana; Theisel, Holger; Urban, Philipp %P 23 - 30 %I Eurographics Association %U http://tinoweinkauf.net/
Vangorp, P., Mantiuk, R., Bazyluk, B., et al. 2014. Depth from HDR: Depth Induction or Increased Realism? SAP 2014, ACM Symposium on Applied Perception, ACM.
Export
BibTeX
@inproceedings{Vangorp2014, TITLE = {Depth from {HDR}: {Depth} Induction or Increased Realism?}, AUTHOR = {Vangorp, Peter and Mantiuk, Rafal and Bazyluk, Bartosz and Myszkowski, Karol and Mantiuk, Rados{\textbackslash}law and Watt, Simon J. and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4503-3009-1}, DOI = {10.1145/2628257.2628258}, PUBLISHER = {ACM}, YEAR = {2014}, DATE = {2014}, BOOKTITLE = {SAP 2014, ACM Symposium on Applied Perception}, EDITOR = {Bailey, Reynold and Kuhl, Scott}, PAGES = {71--78}, ADDRESS = {Vancouver, Canada}, }
Endnote
%0 Conference Proceedings %A Vangorp, Peter %A Mantiuk, Rafal %A Bazyluk, Bartosz %A Myszkowski, Karol %A Mantiuk, Rados\law %A Watt, Simon J. %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Depth from HDR: Depth Induction or Increased Realism? : %G eng %U http://hdl.handle.net/11858/00-001M-0000-001A-34DB-5 %R 10.1145/2628257.2628258 %D 2014 %B ACM Symposium on Applied Perception %Z date of event: 2014-08-08 - 2014-08-09 %C Vancouver, Canada %K binocular disparity, contrast, luminance, stereo 3D %B SAP 2014 %E Bailey, Reynold; Kuhl, Scott %P 71 - 78 %I ACM %@ 978-1-4503-3009-1
Tevs, A., Huang, Q., Wand, M., Seidel, H.-P., and Guibas, L. 2014. Relating Shapes via Geometric Symmetries and Regularities. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2014)33, 4.
Export
BibTeX
@article{TevsSIGGRAPH2014, TITLE = {Relating Shapes via Geometric Symmetries and Regularities}, AUTHOR = {Tevs, Art and Huang, Qixing and Wand, Michael and Seidel, Hans-Peter and Guibas, Leonidas}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2601097.2601220}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2014}, DATE = {2014}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {33}, NUMBER = {4}, PAGES = {1--12}, EID = {119}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2014}, }
Endnote
%0 Journal Article %A Tevs, Art %A Huang, Qixing %A Wand, Michael %A Seidel, Hans-Peter %A Guibas, Leonidas %+ External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Relating Shapes via Geometric Symmetries and Regularities : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-8052-F %F ISI: 000340000100086 %R 10.1145/2601097.2601220 %7 2014-07 %D 2014 %J ACM Transactions on Graphics %V 33 %N 4 %& 1 %P 1 - 12 %Z sequence number: 119 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2014 %O Vancouver, BC, Canada ACM SIGGRAPH 2014
Templin, K., Didyk, P., Myszkowski, K., Hefeeda, M.M., Seidel, H.-P., and Matusik, W. 2014a. Modeling and Optimizing Eye Vergence Response to Stereoscopic Cuts. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2014)33, 4.
Export
BibTeX
@article{Templin:2014:MOE:2601097.2601148, TITLE = {Modeling and Optimizing Eye Vergence Response to Stereoscopic Cuts}, AUTHOR = {Templin, Krzysztof and Didyk, Piotr and Myszkowski, Karol and Hefeeda, Mohamed M. and Seidel, Hans-Peter and Matusik, Wojciech}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2601097.2601148}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2014}, DATE = {2014}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {33}, NUMBER = {4}, PAGES = {1--8}, EID = {145}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2014}, }
Endnote
%0 Journal Article %A Templin, Krzysztof %A Didyk, Piotr %A Myszkowski, Karol %A Hefeeda, Mohamed M. %A Seidel, Hans-Peter %A Matusik, Wojciech %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Modeling and Optimizing Eye Vergence Response to Stereoscopic Cuts : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0023-EE16-9 %R 10.1145/2601097.2601148 %7 2014 %D 2014 %K S3D, binocular, eye&#8208;tracking %J ACM Transactions on Graphics %V 33 %N 4 %& 1 %P 1 - 8 %Z sequence number: 145 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2014 %O ACM SIGGRAPH 2014 Vancouver, BC, Canada
Templin, K., Didyk, P., Myszkowski, K., and Seidel, H.-P. 2014b. Perceptually-motivated Stereoscopic Film Grain. Computer Graphics Forum (Proc. Pacific Graphics 2014)33, 7.
Export
BibTeX
@article{Templin2014b, TITLE = {Perceptually-motivated Stereoscopic Film Grain}, AUTHOR = {Templin, Krzysztof and Didyk, Piotr and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1111/cgf.12503}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2014}, DATE = {2014}, JOURNAL = {Computer Graphics Forum (Proc. Pacific Graphics)}, VOLUME = {33}, NUMBER = {7}, PAGES = {349--358}, BOOKTITLE = {22nd Pacific Conference on Computer Graphics and Applications (Pacific Graphics 2014)}, }
Endnote
%0 Journal Article %A Templin, Krzysztof %A Didyk, Piotr %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptually-motivated Stereoscopic Film Grain : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5DF2-B %R 10.1111/cgf.12503 %7 2014-10-28 %D 2014 %J Computer Graphics Forum %V 33 %N 7 %& 349 %P 349 - 358 %I Wiley-Blackwell %C Oxford %B 22nd Pacific Conference on Computer Graphics and Applications %O Pacific Graphics 2014 PG 2014 8 to 10 Oct 2014, Seoul, South Korea
Sridhar, S., Rhodin, H., Seidel, H.-P., Oulasvirta, A., and Theobalt, C. 2014a. Real-time Hand Tracking Using a Sum of Anisotropic Gaussians Model. Proceedings of the 2nd International Conference on 3D Vision, IEEE explore.
(arXiv: 1602.03860)
Abstract
Real-time marker-less hand tracking is of increasing importance in<br>human-computer interaction. Robust and accurate tracking of arbitrary hand<br>motion is a challenging problem due to the many degrees of freedom, frequent<br>self-occlusions, fast motions, and uniform skin color. In this paper, we<br>propose a new approach that tracks the full skeleton motion of the hand from<br>multiple RGB cameras in real-time. The main contributions include a new<br>generative tracking method which employs an implicit hand shape representation<br>based on Sum of Anisotropic Gaussians (SAG), and a pose fitting energy that is<br>smooth and analytically differentiable making fast gradient based pose<br>optimization possible. This shape representation, together with a full<br>perspective projection model, enables more accurate hand modeling than a<br>related baseline method from literature. Our method achieves better accuracy<br>than previous methods and runs at 25 fps. We show these improvements both<br>qualitatively and quantitatively on publicly available datasets.<br>
Export
BibTeX
@inproceedings{Sridhar2016arXiv1602.03860, TITLE = {Real-time Hand Tracking Using a Sum of Anisotropic {Gaussians} Model}, AUTHOR = {Sridhar, Srinath and Rhodin, Helge and Seidel, Hans-Peter and Oulasvirta, Antti and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-4799-7000-1}, URL = {http://arxiv.org/abs/1602.03860}, DOI = {10.1109/3DV.2014.37}, EPRINT = {1602.03860}, EPRINTTYPE = {arXiv}, PUBLISHER = {IEEE explore}, YEAR = {2014}, DATE = {2014}, ABSTRACT = {Real-time marker-less hand tracking is of increasing importance in<br>human-computer interaction. Robust and accurate tracking of arbitrary hand<br>motion is a challenging problem due to the many degrees of freedom, frequent<br>self-occlusions, fast motions, and uniform skin color. In this paper, we<br>propose a new approach that tracks the full skeleton motion of the hand from<br>multiple RGB cameras in real-time. The main contributions include a new<br>generative tracking method which employs an implicit hand shape representation<br>based on Sum of Anisotropic Gaussians (SAG), and a pose fitting energy that is<br>smooth and analytically differentiable making fast gradient based pose<br>optimization possible. This shape representation, together with a full<br>perspective projection model, enables more accurate hand modeling than a<br>related baseline method from literature. Our method achieves better accuracy<br>than previous methods and runs at 25 fps. We show these improvements both<br>qualitatively and quantitatively on publicly available datasets.<br>}, BOOKTITLE = {Proceedings of the 2nd International Conference on 3D Vision}, PAGES = {319--326}, ADDRESS = {Tokyo, Japan}, }
Endnote
%0 Conference Proceedings %A Sridhar, Srinath %A Rhodin, Helge %A Seidel, Hans-Peter %A Oulasvirta, Antti %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Real-time Hand Tracking Using a Sum of Anisotropic Gaussians Model : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-9878-6 %U http://arxiv.org/abs/1602.03860 %R 10.1109/3DV.2014.37 %D 2014 %B 2nd International Conference on 3D Vision %Z date of event: 2014-12-08 - 2014-12-11 %C Tokyo, Japan %X Real-time marker-less hand tracking is of increasing importance in<br>human-computer interaction. Robust and accurate tracking of arbitrary hand<br>motion is a challenging problem due to the many degrees of freedom, frequent<br>self-occlusions, fast motions, and uniform skin color. In this paper, we<br>propose a new approach that tracks the full skeleton motion of the hand from<br>multiple RGB cameras in real-time. The main contributions include a new<br>generative tracking method which employs an implicit hand shape representation<br>based on Sum of Anisotropic Gaussians (SAG), and a pose fitting energy that is<br>smooth and analytically differentiable making fast gradient based pose<br>optimization possible. This shape representation, together with a full<br>perspective projection model, enables more accurate hand modeling than a<br>related baseline method from literature. Our method achieves better accuracy<br>than previous methods and runs at 25 fps. We show these improvements both<br>qualitatively and quantitatively on publicly available datasets.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV %B Proceedings of the 2nd International Conference on 3D Vision %P 319 - 326 %I IEEE explore %@ 978-1-4799-7000-1
Sridhar, S., Rhodin, H., Seidel, H.-P., Oulasvirta, A., and Theobalt, C. 2014b. Real-time Hand Tracking Using a Sum of Anisotropic Gaussians Model. 3DV 2014, International Conference on 3D Vision, IEEE Computer Society.
Export
BibTeX
@inproceedings{sridhar2014real, TITLE = {Real-time Hand Tracking Using a Sum of Anisotropic {Gaussians} Model}, AUTHOR = {Sridhar, Srinath and Rhodin, Helge and Seidel, Hans-Peter and Oulasvirta, Antti and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-4799-7001-8}, DOI = {10.1109/3DV.2014.37}, PUBLISHER = {IEEE Computer Society}, YEAR = {2014}, DATE = {2014}, BOOKTITLE = {3DV 2014, International Conference on 3D Vision}, PAGES = {319--326}, ADDRESS = {Tokyo, Japan}, }
Endnote
%0 Conference Proceedings %A Sridhar, Srinath %A Rhodin, Helge %A Seidel, Hans-Peter %A Oulasvirta, Antti %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Real-time Hand Tracking Using a Sum of Anisotropic Gaussians Model : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-69E9-F %R 10.1109/3DV.2014.37 %D 2014 %B International Conference on 3D Vision %Z date of event: 2014-12-08 - 2014-12-11 %C Tokyo, Japan %B 3DV 2014 %P 319 - 326 %I IEEE Computer Society %@ 978-1-4799-7001-8
Schulze, M., Martinez Esturo, J., Günther, T., et al. 2014. Sets of Globally Optimal Stream Surfaces for Flow Visualization. Computer Graphics Forum (Proc. EuroVis 2014)33, 3.
Export
BibTeX
@article{Schulze2014, TITLE = {Sets of Globally Optimal Stream Surfaces for Flow Visualization}, AUTHOR = {Schulze, Maik and Martinez Esturo, Janick and G{\"u}nther, T. and R{\"o}ssl, Christian and Seidel, Hans-Peter and Weinkauf, Tino and Theisel, Holger}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12356}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2014}, DATE = {2014}, JOURNAL = {Computer Graphics Forum (Proc. EuroVis)}, VOLUME = {33}, NUMBER = {3}, PAGES = {1--10}, BOOKTITLE = {Eurographics Conference on Visualization (EuroVis 2014)}, EDITOR = {Carr, Hamish and Rheingans, Penny and Schumann, Heidrun}, }
Endnote
%0 Journal Article %A Schulze, Maik %A Martinez Esturo, Janick %A G&#252;nther, T. %A R&#246;ssl, Christian %A Seidel, Hans-Peter %A Weinkauf, Tino %A Theisel, Holger %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Sets of Globally Optimal Stream Surfaces for Flow Visualization : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0023-F518-1 %R 10.1111/cgf.12356 %7 2014-07-12 %D 2014 %K Categories and Subject Descriptors (according to ACM CCS), I.3.5 [Computer Graphics]: Computational Geometry and Object Modeling&#8212;Geometric algorithms, languages, and systems %J Computer Graphics Forum %V 33 %N 3 %& 1 %P 1 - 10 %I Wiley-Blackwell %C Oxford %@ false %B Eurographics Conference on Visualization %O EuroVis 2014 Swansea, Wales, UK, June 9 &#8211; 13, 2014
Schulz, C., von Tycowicz, C., Seidel, H.-P., and Hildebrandt, K. 2014a. Animating Deformable Objects Using Sparse Spacetime Constraints. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2014)33, 4.
Export
BibTeX
@article{Schulz2014, TITLE = {Animating Deformable Objects Using Sparse Spacetime Constraints}, AUTHOR = {Schulz, Christian and von Tycowicz, Christoph and Seidel, Hans-Peter and Hildebrandt, Klaus}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2601097.2601156}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2014}, DATE = {2014}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {33}, NUMBER = {4}, PAGES = {1--10}, EID = {109}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2014}, }
Endnote
%0 Journal Article %A Schulz, Christian %A von Tycowicz, Christoph %A Seidel, Hans-Peter %A Hildebrandt, Klaus %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Animating Deformable Objects Using Sparse Spacetime Constraints : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0023-EE18-5 %R 10.1145/2601097.2601156 %7 2014 %D 2014 %K model reduction, optimal control, physically&#8208;based animation, spacetime constraints, wiggly splines %J ACM Transactions on Graphics %V 33 %N 4 %& 1 %P 1 - 10 %Z sequence number: 109 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2014 %O ACM SIGGRAPH 2014 Vancouver, BC, Canada
Schulz, C., von Tycowicz, C., Seidel, H.-P., and Hildebrandt, K. 2014b. Proofs of two Theorems concerning Sparse Spacetime Constraints. https://arxiv.org/abs/1405.1902v1.
(arXiv: 1405.1902)
Abstract
In the SIGGRAPH 2014 paper [SvTSH14] an approach for animating deformable<br>objects using sparse spacetime constraints is introduced. This report contains<br>the proofs of two theorems presented in the paper.<br>
Export
BibTeX
@online{Schulz-et-al_2014, TITLE = {Proofs of two Theorems concerning Sparse Spacetime Constraints}, AUTHOR = {Schulz, Christian and von Tycowicz, Christoph and Seidel, Hans-Peter and Hildebrandt, Klaus}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/1405.1902v1}, DOI = {10.48550/arXiv.1405.1902}, EPRINT = {1405.1902}, EPRINTTYPE = {arXiv}, YEAR = {2014}, DATE = {2014}, ABSTRACT = {In the SIGGRAPH 2014 paper [SvTSH14] an approach for animating deformable<br>objects using sparse spacetime constraints is introduced. This report contains<br>the proofs of two theorems presented in the paper.<br>}, }
Endnote
%0 Report %A Schulz, Christian %A von Tycowicz, Christoph %A Seidel, Hans-Peter %A Hildebrandt, Klaus %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Proofs of two Theorems concerning Sparse Spacetime Constraints : %G eng %U http://hdl.handle.net/21.11116/0000-000F-7066-0 %U https://arxiv.org/abs/1405.1902v1 %R 10.48550/arXiv.1405.1902 %D 2014 %X In the SIGGRAPH 2014 paper [SvTSH14] an approach for animating deformable<br>objects using sparse spacetime constraints is introduced. This report contains<br>the proofs of two theorems presented in the paper.<br> %K Computer Science, Graphics, cs.GR,Mathematics, Numerical Analysis, math.NA
Saikia, H., Seidel, H.-P., and Weinkauf, T. 2014. Extended Branch Decomposition Graphs: Structural Comparison of Scalar Data. Computer Graphics Forum (Proc. EuroVis 2014)33, 3.
Abstract
We present a method to find repeating topological structures in scalar data sets. More precisely, we compare all subtrees of two merge trees against each other - in an efficient manner exploiting redundancy. This provides pair-wise distances between the topological structures defined by sub/superlevel sets, which can be exploited in several applications such as finding similar structures in the same data set, assessing periodic behavior in time-dependent data, and comparing the topology of two different data sets. To do so, we introduce a novel data structure called the extended branch decomposition graph, which is composed of the branch decompositions of all subtrees of the merge tree. Based on dynamic programming, we provide two highly efficient algorithms for computing and comparing extended branch decomposition graphs. Several applications attest to the utility of our method and its robustness against noise.
Export
BibTeX
@article{saikia14a, TITLE = {Extended Branch Decomposition Graphs: {Structural} Comparison of Scalar Data}, AUTHOR = {Saikia, Himangshu and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, DOI = {10.1111/cgf.12360}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2014}, DATE = {2014}, ABSTRACT = {We present a method to find repeating topological structures in scalar data sets. More precisely, we compare all subtrees of two merge trees against each other -- in an efficient manner exploiting redundancy. This provides pair-wise distances between the topological structures defined by sub/superlevel sets, which can be exploited in several applications such as finding similar structures in the same data set, assessing periodic behavior in time-dependent data, and comparing the topology of two different data sets. To do so, we introduce a novel data structure called the extended branch decomposition graph, which is composed of the branch decompositions of all subtrees of the merge tree. Based on dynamic programming, we provide two highly efficient algorithms for computing and comparing extended branch decomposition graphs. Several applications attest to the utility of our method and its robustness against noise.}, JOURNAL = {Computer Graphics Forum (Proc. EuroVis)}, VOLUME = {33}, NUMBER = {3}, PAGES = {41--50}, BOOKTITLE = {Eurographics Conference on Visualization 2014 (EuroVis 2014)}, EDITOR = {Carr, Hamish and Rheingans, Penny and Schumann, Heidrun}, }
Endnote
%0 Journal Article %A Saikia, Himangshu %A Seidel, Hans-Peter %A Weinkauf, Tino %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Extended Branch Decomposition Graphs: Structural Comparison of Scalar Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-4FFB-A %R 10.1111/cgf.12360 %7 2014 %D 2014 %X We present a method to find repeating topological structures in scalar data sets. More precisely, we compare all subtrees of two merge trees against each other - in an efficient manner exploiting redundancy. This provides pair-wise distances between the topological structures defined by sub/superlevel sets, which can be exploited in several applications such as finding similar structures in the same data set, assessing periodic behavior in time-dependent data, and comparing the topology of two different data sets. To do so, we introduce a novel data structure called the extended branch decomposition graph, which is composed of the branch decompositions of all subtrees of the merge tree. Based on dynamic programming, we provide two highly efficient algorithms for computing and comparing extended branch decomposition graphs. Several applications attest to the utility of our method and its robustness against noise. %J Computer Graphics Forum %V 33 %N 3 %& 41 %P 41 - 50 %I Wiley-Blackwell %C Oxford %B Eurographics Conference on Visualization 2014 %O EuroVis 2014 Swansea, Wales, UK, June 9 &#8211; 13, 2014
Rhodin, H., Tompkin, J., Kim, K.I., Varanasi, K., Seidel, H.-P., and Theobalt, C. 2014. Interactive Motion Mapping for Real-time Character Control. Computer Graphics Forum (Proc. EUROGRAPHICS 2014)33, 2.
Export
BibTeX
@article{RhodinCGF2014, TITLE = {Interactive Motion Mapping for Real-time Character Control}, AUTHOR = {Rhodin, Helge and Tompkin, James and Kim, Kwang In and Varanasi, Kiran and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12325}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2014}, DATE = {2014-05}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {33}, NUMBER = {2}, PAGES = {273--282}, BOOKTITLE = {EUROGRAPHICS 2014}, EDITOR = {L{\'e}vy, Bruno and Kautz, Jan}, }
Endnote
%0 Journal Article %A Rhodin, Helge %A Tompkin, James %A Kim, Kwang In %A Varanasi, Kiran %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive Motion Mapping for Real-time Character Control : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-8096-6 %F ISI: 000337543000028 %R 10.1111/cgf.12325 %7 2014 %D 2014 %J Computer Graphics Forum %V 33 %N 2 %& 273 %P 273 - 282 %I Wiley-Blackwell %C Oxford, UK %@ false %B EUROGRAPHICS 2014 %O The European Association for Computer Graphics 35th Annual Conference ; Strasbourg, France, April 7th &#8211; 11th, 2014 EUROGRAPHICS 2014 EG 2014
Palmas, G., Bachynskyi, M., Oulasvirta, A., Seidel, H.-P., and Weinkauf, T. 2014a. MovExp: A Versatile Visualization Tool for Human-Computer Interaction Studies with 3D Performance and Biomechanical Data. IEEE Transactions on Visualization and Computer Graphics (Proc. IEEE VIS 2014)20, 12.
Abstract
In Human-Computer Interaction (HCI), experts seek to evaluate and compare the performance and ergonomics of user interfaces. Recently, a novel cost-efficient method for estimating physical ergonomics and performance has been introduced to HCI. It is based on optical motion capture and biomechanical simulation. It provides a rich source for analyzing human movements summarized in a multidimensional data set. Existing visualization tools do not sufficiently support the HCI experts in analyzing this data. We identified two shortcomings. First, appropriate visual encodings are missing particularly for the biomechanical aspects of the data. Second, the physical setup of the user interface cannot be incorporated explicitly into existing tools. We present MovExp, a versatile visualization tool that supports the evaluation of user interfaces. In particular, it can be easily adapted by the HCI experts to include the physical setup that is being evaluated, and visualize the data on top of it. Furthermore, it provides a variety of visual encodings to communicate muscular loads, movement directions, and other specifics of HCI studies that employ motion capture and biomechanical simulation. In this design study, we follow a problem-driven research approach. Based on a formalization of the visualization needs and the data structure, we formulate technical requirements for the visualization tool and present novel solutions to the analysis needs of the HCI experts. We show the utility of our tool with four case studies from the daily work of our HCI experts.
Export
BibTeX
@article{palmas14b, TITLE = {{MovExp}: A Versatile Visualization Tool for Human-Computer Interaction Studies with {3D} Performance and Biomechanical Data}, AUTHOR = {Palmas, Gregorio and Bachynskyi, Myroslav and Oulasvirta, Antti and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2014.2346311}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {Los Alamitos, CA}, YEAR = {2014}, DATE = {2014-12}, ABSTRACT = {In Human-Computer Interaction (HCI), experts seek to evaluate and compare the performance and ergonomics of user interfaces. Recently, a novel cost-efficient method for estimating physical ergonomics and performance has been introduced to HCI. It is based on optical motion capture and biomechanical simulation. It provides a rich source for analyzing human movements summarized in a multidimensional data set. Existing visualization tools do not sufficiently support the HCI experts in analyzing this data. We identified two shortcomings. First, appropriate visual encodings are missing particularly for the biomechanical aspects of the data. Second, the physical setup of the user interface cannot be incorporated explicitly into existing tools. We present MovExp, a versatile visualization tool that supports the evaluation of user interfaces. In particular, it can be easily adapted by the HCI experts to include the physical setup that is being evaluated, and visualize the data on top of it. Furthermore, it provides a variety of visual encodings to communicate muscular loads, movement directions, and other specifics of HCI studies that employ motion capture and biomechanical simulation. In this design study, we follow a problem-driven research approach. Based on a formalization of the visualization needs and the data structure, we formulate technical requirements for the visualization tool and present novel solutions to the analysis needs of the HCI experts. We show the utility of our tool with four case studies from the daily work of our HCI experts.}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics (Proc. IEEE VIS)}, VOLUME = {20}, NUMBER = {12}, PAGES = {2359--2368}, BOOKTITLE = {IEEE Visual Analytics Science \& Technology Conference, IEEE Information Visualization Conference, and IEEE Scientific Visualization Conference Proceedings 2014}, DEBUG = {author: Ebert, David; author: Hauser, Helwig; author: Heer, Jeffrey; author: North, Chris; author: Tory, Melanie; author: Qu, Huamin; author: Shen, Han-Wei; author: Ynnerman, Anders}, EDITOR = {Chen, Min}, }
Endnote
%0 Journal Article %A Palmas, Gregorio %A Bachynskyi, Myroslav %A Oulasvirta, Antti %A Seidel, Hans-Peter %A Weinkauf, Tino %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T MovExp: A Versatile Visualization Tool for Human-Computer Interaction Studies with 3D Performance and Biomechanical Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-4D4C-4 %R 10.1109/TVCG.2014.2346311 %7 2014 %D 2014 %X In Human-Computer Interaction (HCI), experts seek to evaluate and compare the performance and ergonomics of user interfaces. Recently, a novel cost-efficient method for estimating physical ergonomics and performance has been introduced to HCI. It is based on optical motion capture and biomechanical simulation. It provides a rich source for analyzing human movements summarized in a multidimensional data set. Existing visualization tools do not sufficiently support the HCI experts in analyzing this data. We identified two shortcomings. First, appropriate visual encodings are missing particularly for the biomechanical aspects of the data. Second, the physical setup of the user interface cannot be incorporated explicitly into existing tools. We present MovExp, a versatile visualization tool that supports the evaluation of user interfaces. In particular, it can be easily adapted by the HCI experts to include the physical setup that is being evaluated, and visualize the data on top of it. Furthermore, it provides a variety of visual encodings to communicate muscular loads, movement directions, and other specifics of HCI studies that employ motion capture and biomechanical simulation. In this design study, we follow a problem-driven research approach. Based on a formalization of the visualization needs and the data structure, we formulate technical requirements for the visualization tool and present novel solutions to the analysis needs of the HCI experts. We show the utility of our tool with four case studies from the daily work of our HCI experts. %J IEEE Transactions on Visualization and Computer Graphics %V 20 %N 12 %& 2359 %P 2359 - 2368 %I IEEE Computer Society %C Los Alamitos, CA %@ false %B IEEE Visual Analytics Science & Technology Conference, IEEE Information Visualization Conference, and IEEE Scientific Visualization Conference Proceedings 2014 %O Proceedings 2014 ; Paris, France, 9&#8211;14 November 2014 IEEE VIS 2014
Palmas, G., Bachynskyi, M., Oulasvirta, A., Seidel, H.-P., and Weinkauf, T. 2014b. An Edge-bundling Layout for Interactive Parallel Coordinates. PacificVis 2014, IEEE Pacific Visualization Symposium, IEEE Computer Society.
Abstract
Parallel Coordinates is an often used visualization method for multidimensional data sets. Its main challenges for large data sets are visual clutter and overplotting which hamper the recognition of patterns in the data. We present an edge-bundling method using density-based clustering for each dimension. This reduces clutter and provides a faster overview of clusters and trends. Moreover, it allows rendering the clustered lines using polygons, decreasing rendering time remarkably. In addition, we design interactions to support multidimensional clustering with this method. A user study shows improvements over the classic parallel coordinates plot in two user tasks: correlation estimation and subset tracing.
Export
BibTeX
@inproceedings{palmas14a, TITLE = {An Edge-bundling Layout for Interactive Parallel Coordinates}, AUTHOR = {Palmas, Gregorio and Bachynskyi, Myroslav and Oulasvirta, Antti and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, DOI = {10.1109/PacificVis.2014.40}, PUBLISHER = {IEEE Computer Society}, YEAR = {2014}, DATE = {2014-03}, ABSTRACT = {Parallel Coordinates is an often used visualization method for multidimensional data sets. Its main challenges for large data sets are visual clutter and overplotting which hamper the recognition of patterns in the data. We present an edge-bundling method using density-based clustering for each dimension. This reduces clutter and provides a faster overview of clusters and trends. Moreover, it allows rendering the clustered lines using polygons, decreasing rendering time remarkably. In addition, we design interactions to support multidimensional clustering with this method. A user study shows improvements over the classic parallel coordinates plot in two user tasks: correlation estimation and subset tracing.}, BOOKTITLE = {PacificVis 2014, IEEE Pacific Visualization Symposium}, PAGES = {57--64}, ADDRESS = {Yokohama, Japan}, }
Endnote
%0 Conference Proceedings %A Palmas, Gregorio %A Bachynskyi, Myroslav %A Oulasvirta, Antti %A Seidel, Hans-Peter %A Weinkauf, Tino %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T An Edge-bundling Layout for Interactive Parallel Coordinates : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-4D29-0 %R 10.1109/PacificVis.2014.40 %D 2014 %B IEEE Pacific Visualization Symposium %Z date of event: 2014-03-04 - 2014-03-07 %C Yokohama, Japan %X Parallel Coordinates is an often used visualization method for multidimensional data sets. Its main challenges for large data sets are visual clutter and overplotting which hamper the recognition of patterns in the data. We present an edge-bundling method using density-based clustering for each dimension. This reduces clutter and provides a faster overview of clusters and trends. Moreover, it allows rendering the clustered lines using polygons, decreasing rendering time remarkably. In addition, we design interactions to support multidimensional clustering with this method. A user study shows improvements over the classic parallel coordinates plot in two user tasks: correlation estimation and subset tracing. %B PacificVis 2014 %P 57 - 64 %I IEEE Computer Society
Nalbach, O., Ritschel, T., and Seidel, H.-P. 2014a. Deep Screen Space for Indirect Lighting of Volumes. VMV 2014 Vision, Modeling and Visualization, Eurographics Association.
Export
BibTeX
@inproceedings{DBLP:conf/vmv/NalbachRS14, TITLE = {Deep Screen Space for Indirect Lighting of Volumes}, AUTHOR = {Nalbach, Oliver and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-905674-74-3}, DOI = {10.2312/vmv.20141287}, PUBLISHER = {Eurographics Association}, YEAR = {2014}, DATE = {2014}, BOOKTITLE = {VMV 2014 Vision, Modeling and Visualization}, EDITOR = {Bender, Jan and Kuijper, Arjan and von Landesberger, Tatiana and Theisel, Holger and Urban, Philipp}, PAGES = {143--150}, ADDRESS = {Darmstadt, Germany}, }
Endnote
%0 Conference Proceedings %A Nalbach, Oliver %A Ritschel, Tobias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Deep Screen Space for Indirect Lighting of Volumes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-4D6C-B %R 10.2312/vmv.20141287 %D 2014 %B 19th International Workshop on Vision, Modeling and Visualization %Z date of event: 2014-10-08 - 2014-10-10 %C Darmstadt, Germany %B VMV 2014 Vision, Modeling and Visualization %E Bender, Jan; Kuijper, Arjan; von Landesberger, Tatiana; Theisel, Holger; Urban, Philipp %P 143 - 150 %I Eurographics Association %@ 978-3-905674-74-3 %U http://dx.doi.org/10.2312/vmv.20141287
Nalbach, O., Ritschel, T., and Seidel, H.-P. 2014b. Deep Screen Space. Proceedings I3D 2014, ACM.
Export
BibTeX
@inproceedings{Nalbach:2014:DSS:2556700.2556708, TITLE = {Deep Screen Space}, AUTHOR = {Nalbach, Oliver and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4503-2717-6}, URL = {http://doi.acm.org/10.1145/2556700.2556708}, DOI = {10.1145/2556700.2556708}, PUBLISHER = {ACM}, YEAR = {2014}, DATE = {2014}, BOOKTITLE = {Proceedings I3D 2014}, EDITOR = {Keyser, John and Sander, Pedro}, PAGES = {79--86}, ADDRESS = {San Francisco, CA, USA}, }
Endnote
%0 Conference Proceedings %A Nalbach, Oliver %A Ritschel, Tobias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Deep Screen Space : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-4D74-8 %R 10.1145/2556700.2556708 %U http://doi.acm.org/10.1145/2556700.2556708 %D 2014 %B 18th Meeting of the ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games %Z date of event: 2014-03-14 - 2014-03-16 %C San Francisco, CA, USA %B Proceedings I3D 2014 %E Keyser, John; Sander, Pedro %P 79 - 86 %I ACM %@ 978-1-4503-2717-6
Lochmann, G., Reinert, B., Ritschel, T., Müller, S., and Seidel, H.-P. 2014. Real‐time Reflective and Refractive Novel‐view Synthesis. VMV 2014 Vision, Modeling and Visualization, Eurographics Association.
Export
BibTeX
@inproceedings{LochmannVMV2014, TITLE = {Real-time Reflective and Refractive Novel-view Synthesis}, AUTHOR = {Lochmann, Gerrit and Reinert, Bernhard and Ritschel, Tobias and M{\"u}ller, Stefan and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.2312/vmv.20141270}, PUBLISHER = {Eurographics Association}, YEAR = {2014}, DATE = {2014}, BOOKTITLE = {VMV 2014 Vision, Modeling and Visualization}, EDITOR = {Bender, Jan and Kuijper, Arjan and Landesberger, Tatiana and Theisel, Holger and Urban, Philipp}, PAGES = {9--16}, ADDRESS = {Darmstadt, Germany}, }
Endnote
%0 Conference Proceedings %A Lochmann, Gerrit %A Reinert, Bernhard %A Ritschel, Tobias %A M&#252;ller, Stefan %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Real&#8208;time Reflective and Refractive Novel&#8208;view Synthesis : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-533E-6 %R 10.2312/vmv.20141270 %D 2014 %B 19th International Workshop on Vision, Modeling and Visualization %Z date of event: 2014-10-08 - 2014-10-10 %C Darmstadt, Germany %B VMV 2014 Vision, Modeling and Visualization %E Bender, Jan; Kuijper, Arjan; Landesberger, Tatiana; Theisel, Holger; Urban, Philipp %P 9 - 16 %I Eurographics Association %U http://dx.doi.org/10.2312/vmv.20141270
Kurz, C., Wu, X., Wand, M., Thormählen, T., Kohli, P., and Seidel, H.-P. 2014. Symmetry-aware Template Deformation and Fitting. Computer Graphics Forum33, 6.
Export
BibTeX
@article{Kurz2014, TITLE = {Symmetry-aware Template Deformation and Fitting}, AUTHOR = {Kurz, Christian and Wu, Xiaokun and Wand, Michael and Thorm{\"a}hlen, Thorsten and Kohli, P. and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1111/cgf.12344}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2014}, DATE = {2014}, JOURNAL = {Computer Graphics Forum}, VOLUME = {33}, NUMBER = {6}, PAGES = {205--219}, }
Endnote
%0 Journal Article %A Kurz, Christian %A Wu, Xiaokun %A Wand, Michael %A Thorm&#228;hlen, Thorsten %A Kohli, P. %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Symmetry-aware Template Deformation and Fitting : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5D2B-D %R 10.1111/cgf.12344 %7 2014-03-20 %D 2014 %J Computer Graphics Forum %V 33 %N 6 %& 205 %P 205 - 219 %I Wiley-Blackwell %C Oxford
Kozlov, Y., Esturo, J.M., Seidel, H.-P., and Weinkauf, T. 2014. Regularized Harmonic Surface Deformation. http://arxiv.org/abs/1408.3326.
(arXiv: 1408.3326)
Abstract
Harmonic surface deformation is a well-known geometric modeling method that creates plausible deformations in an interactive manner. However, this method is susceptible to artifacts, in particular close to the deformation handles. These artifacts often correlate with strong gradients of the deformation energy.In this work, we propose a novel formulation of harmonic surface deformation, which incorporates a regularization of the deformation energy. To do so, we build on and extend a recently introduced generic linear regularization approach. It can be expressed as a change of norm for the linear optimization problem, i.e., the regularization is baked into the optimization. This minimizes the implementation complexity and has only a small impact on runtime. Our results show that a moderate use of regularization suppresses many deformation artifacts common to the well-known harmonic surface deformation method, without introducing new artifacts.
Export
BibTeX
@online{kozlov14, TITLE = {Regularized Harmonic Surface Deformation}, AUTHOR = {Kozlov, Yeara and Esturo, Janick Martinez and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1408.3326}, EPRINT = {1408.3326}, EPRINTTYPE = {arXiv}, YEAR = {2014}, ABSTRACT = {Harmonic surface deformation is a well-known geometric modeling method that creates plausible deformations in an interactive manner. However, this method is susceptible to artifacts, in particular close to the deformation handles. These artifacts often correlate with strong gradients of the deformation energy.In this work, we propose a novel formulation of harmonic surface deformation, which incorporates a regularization of the deformation energy. To do so, we build on and extend a recently introduced generic linear regularization approach. It can be expressed as a change of norm for the linear optimization problem, i.e., the regularization is baked into the optimization. This minimizes the implementation complexity and has only a small impact on runtime. Our results show that a moderate use of regularization suppresses many deformation artifacts common to the well-known harmonic surface deformation method, without introducing new artifacts.}, }
Endnote
%0 Report %A Kozlov, Yeara %A Esturo, Janick Martinez %A Seidel, Hans-Peter %A Weinkauf, Tino %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Regularized Harmonic Surface Deformation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-49F5-A %U http://arxiv.org/abs/1408.3326 %D 2014 %X Harmonic surface deformation is a well-known geometric modeling method that creates plausible deformations in an interactive manner. However, this method is susceptible to artifacts, in particular close to the deformation handles. These artifacts often correlate with strong gradients of the deformation energy.In this work, we propose a novel formulation of harmonic surface deformation, which incorporates a regularization of the deformation energy. To do so, we build on and extend a recently introduced generic linear regularization approach. It can be expressed as a change of norm for the linear optimization problem, i.e., the regularization is baked into the optimization. This minimizes the implementation complexity and has only a small impact on runtime. Our results show that a moderate use of regularization suppresses many deformation artifacts common to the well-known harmonic surface deformation method, without introducing new artifacts. %K Computer Science, Graphics, cs.GR
Klehm, O., Seidel, H.-P., and Eisemann, E. 2014a. Filter-based Real-time Single Scattering using Rectified Shadow Maps. Journal of Computer Graphics Techniques3, 3.
Export
BibTeX
@article{fbss_jcgtKlehm2014, TITLE = {Filter-based Real-time Single Scattering using Rectified Shadow Maps}, AUTHOR = {Klehm, Oliver and Seidel, Hans-Peter and Eisemann, Elmar}, LANGUAGE = {eng}, ISSN = {2331-7418}, URL = {http://jcgt.org/published/0003/03/02/}, PUBLISHER = {Williams College}, ADDRESS = {Williamstown, MA}, YEAR = {2014}, DATE = {2014-08}, JOURNAL = {Journal of Computer Graphics Techniques}, VOLUME = {3}, NUMBER = {3}, PAGES = {7--34}, }
Endnote
%0 Journal Article %A Klehm, Oliver %A Seidel, Hans-Peter %A Eisemann, Elmar %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Filter-based Real-time Single Scattering using Rectified Shadow Maps : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-51B3-E %U http://jcgt.org/published/0003/03/02/ %7 2014 %D 2014 %J Journal of Computer Graphics Techniques %O JCGT %V 3 %N 3 %& 7 %P 7 - 34 %I Williams College %C Williamstown, MA %@ false %U http://jcgt.org/published/0003/03/02/
Klehm, O., Seidel, H.-P., and Eisemann, E. 2014b. Prefiltered Single Scattering. Proceedings I3D 2014, ACM.
Export
BibTeX
@inproceedings{Klehm:2014:PSS:2556700.2556704, TITLE = {Prefiltered Single Scattering}, AUTHOR = {Klehm, Oliver and Seidel, Hans-Peter and Eisemann, Elmar}, LANGUAGE = {eng}, ISBN = {978-1-4503-2717-6}, DOI = {10.1145/2556700.2556704}, PUBLISHER = {ACM}, YEAR = {2014}, DATE = {2014}, BOOKTITLE = {Proceedings I3D 2014}, EDITOR = {Keyser, John and Sander, Pedro}, PAGES = {71--78}, ADDRESS = {San Francisco, CA, USA}, }
Endnote
%0 Conference Proceedings %A Klehm, Oliver %A Seidel, Hans-Peter %A Eisemann, Elmar %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Prefiltered Single Scattering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-51C5-6 %R 10.1145/2556700.2556704 %D 2014 %B 18th Meeting of the ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games %Z date of event: 2014-03-14 - 2014-03-16 %C San Francisco, CA, USA %K participating media, scattering, shadow test %B Proceedings I3D 2014 %E Keyser, John; Sander, Pedro %P 71 - 78 %I ACM %@ 978-1-4503-2717-6
Klehm, O., Ihrke, I., Seidel, H.-P., and Eisemann, E. 2014c. Property and Lighting Manipulations for Static Volume Stylization Using a Painting Metaphor. IEEE Transactions on Visualization and Computer Graphics20, 7.
Export
BibTeX
@article{PLM-tvcg_Klehm2014, TITLE = {Property and Lighting Manipulations for Static Volume Stylization Using a Painting Metaphor}, AUTHOR = {Klehm, Oliver and Ihrke, Ivo and Seidel, Hans-Peter and Eisemann, Elmar}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2014.13}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {Los Alamitos, CA}, YEAR = {2014}, DATE = {2014-07}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics}, VOLUME = {20}, NUMBER = {7}, PAGES = {983--995}, }
Endnote
%0 Journal Article %A Klehm, Oliver %A Ihrke, Ivo %A Seidel, Hans-Peter %A Eisemann, Elmar %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Property and Lighting Manipulations for Static Volume Stylization Using a Painting Metaphor : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-51CA-B %R 10.1109/TVCG.2014.13 %7 2014 %D 2014 %K rendering (computer graphics);artistic control;environmental lighting;image component;lighting manipulations;noise function parameters;painting metaphor;property manipulations;realistic rendering;static volume stylization;static volumes;tomographic reconstruction;volume appearance;volume properties;volumetric rendering equation;Equations;Image reconstruction;Lighting;Mathematical model;Optimization;Rendering (computer graphics);Scattering;Artist control;optimization;participating media %J IEEE Transactions on Visualization and Computer Graphics %V 20 %N 7 %& 983 %P 983 - 995 %I IEEE Computer Society %C Los Alamitos, CA %@ false
Kellnhofer, P., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2014a. Improving Perception of Binocular Stereo Motion on 3D Display Devices. Stereoscopic Displays and Applications XXV, SPIE.
Abstract
This paper studies the presentation of moving stereo images on different display devices. We address three representative issues. First, we propose temporal compensation for the Pulfrich effect found when using anaglyph glasses. Second, we describe, how content-adaptive capture protocols can reduce false motion-in-depth sensation for time-multiplexing based displays. Third, we conclude with a recommendation how to improve rendering of synthetic stereo animations.
Export
BibTeX
@inproceedings{Kellnhofer2014a, TITLE = {Improving Perception of Binocular Stereo Motion on {3D} Display Devices}, AUTHOR = {Kellnhofer, Petr and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0277-786X}, ISBN = {9780819499288}, DOI = {10.1117/12.2032389}, PUBLISHER = {SPIE}, YEAR = {2014}, DATE = {2014}, ABSTRACT = {This paper studies the presentation of moving stereo images on different display devices. We address three representative issues. First, we propose temporal compensation for the Pulfrich effect found when using anaglyph glasses. Second, we describe, how content-adaptive capture protocols can reduce false motion-in-depth sensation for time-multiplexing based displays. Third, we conclude with a recommendation how to improve rendering of synthetic stereo animations.}, BOOKTITLE = {Stereoscopic Displays and Applications XXV}, EDITOR = {Woods, Andrew J. and Holliman, Nicolas S. and Favalora, Gregg E.}, PAGES = {1--11}, EID = {901116}, SERIES = {Proceedings of SPIE-IS\&T Electronic Imaging}, VOLUME = {9011}, ADDRESS = {San Francisco, CA, USA}, }
Endnote
%0 Conference Proceedings %A Kellnhofer, Petr %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Improving Perception of Binocular Stereo Motion on 3D Display Devices : %G eng %U http://hdl.handle.net/11858/00-001M-0000-001A-318D-7 %R 10.1117/12.2032389 %D 2014 %B Stereoscopic Displays and Applications XXV %Z date of event: 2014-02-03 - 2014-02-05 %C San Francisco, CA, USA %X This paper studies the presentation of moving stereo images on different display devices. We address three representative issues. First, we propose temporal compensation for the Pulfrich effect found when using anaglyph glasses. Second, we describe, how content-adaptive capture protocols can reduce false motion-in-depth sensation for time-multiplexing based displays. Third, we conclude with a recommendation how to improve rendering of synthetic stereo animations. %B Stereoscopic Displays and Applications XXV %E Woods, Andrew J.; Holliman, Nicolas S.; Favalora, Gregg E. %P 1 - 11 %Z sequence number: 901116 %I SPIE %@ 9780819499288 %B Proceedings of SPIE-IS&T Electronic Imaging %N 9011 %@ false
Kellnhofer, P., Ritschel, T., Vangorp, P., Myszkowski, K., and Seidel, H.-P. 2014b. Stereo Day-for-Night: Retargeting Disparity for Scotopic Vision. ACM Transactions on Applied Perception11, 3.
Export
BibTeX
@article{kellnhofer:2014c:DarkStereo, TITLE = {Stereo Day-for-Night: Retargeting Disparity for Scotopic Vision}, AUTHOR = {Kellnhofer, Petr and Ritschel, Tobias and Vangorp, Peter and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1544-3558}, DOI = {10.1145/2644813}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2014}, DATE = {2014}, JOURNAL = {ACM Transactions on Applied Perception}, VOLUME = {11}, NUMBER = {3}, EID = {15}, }
Endnote
%0 Journal Article %A Kellnhofer, Petr %A Ritschel, Tobias %A Vangorp, Peter %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Stereo Day-for-Night: Retargeting Disparity for Scotopic Vision : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0023-EE0E-E %R 10.1145/2644813 %7 2014 %D 2014 %J ACM Transactions on Applied Perception %V 11 %N 3 %Z sequence number: 15 %I ACM %C New York, NY %@ false
Günther, D., Jacobson, A., Reininghaus, J., Seidel, H.-P., Sorkine-Hornung, O., and Weinkauf, T. 2014. Fast and Memory-efficient Topological Denoising of 2D and 3D Scalar Fields. IEEE Transactions on Visualization and Computer Graphics (Proc. IEEE VIS 2014)20, 12.
Export
BibTeX
@article{guenther14c, TITLE = {Fast and Memory-efficient Topological Denoising of {2D} and {3D} Scalar Fields}, AUTHOR = {G{\"u}nther, David and Jacobson, Alec and Reininghaus, Jan and Seidel, Hans-Peter and Sorkine-Hornung, Olga and Weinkauf, Tino}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2014.2346432}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {Los Alamitos, CA}, YEAR = {2014}, DATE = {2014-12}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics (Proc. IEEE VIS)}, VOLUME = {20}, NUMBER = {12}, PAGES = {2585--2594}, BOOKTITLE = {IEEE Visual Analytics Science \& Technology Conference, IEEE Information Visualization Conference, and IEEE Scientific Visualization Conference Proceedings 2014}, DEBUG = {author: Ebert, David; author: Hauser, Helwig; author: Heer, Jeffrey; author: North, Chris; author: Tory, Melanie; author: Qu, Huamin; author: Shen, Han-Wei; author: Ynnerman, Anders}, EDITOR = {Chen, Min}, }
Endnote
%0 Journal Article %A G&#252;nther, David %A Jacobson, Alec %A Reininghaus, Jan %A Seidel, Hans-Peter %A Sorkine-Hornung, Olga %A Weinkauf, Tino %+ External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Fast and Memory-efficient Topological Denoising of 2D and 3D Scalar Fields : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5349-E %R 10.1109/TVCG.2014.2346432 %7 2014 %D 2014 %J IEEE Transactions on Visualization and Computer Graphics %V 20 %N 12 %& 2585 %P 2585 - 2594 %I IEEE Computer Society %C Los Alamitos, CA %@ false %B IEEE Visual Analytics Science & Technology Conference, IEEE Information Visualization Conference, and IEEE Scientific Visualization Conference Proceedings 2014 %O Proceedings 2014 ; Paris, France, 9&#8211;14 November 2014 IEEE VIS 2014
Guenther, D., Reininghaus, J., Seidel, H.-P., and Weinkauf, T. 2014. Notes on the Simplification of the Morse-Smale Complex. Topological Methods in Data Analysis and Visualization III (TopoInVis 2013), Springer.
Abstract
The Morse-Smale complex can be either explicitly or implicitly represented.<br>Depending on the type of representation, the simplification of the<br>Morse-Smale complex works differently. In the explicit representation,<br>the Morse-Smale complex is directly simplified by explicitly reconnecting<br>the critical points during the simplification. In the implicit representation,<br>on the other hand, the Morse-Smale complex is given by a combinatorial<br>gradient field. In this setting, the simplification changes the combinatorial<br>flow, which yields an indirect simplification of the Morse-Smale<br>complex. The topological complexity of the Morse-Smale complex is<br>reduced in both representations. However, the simplifications generally<br>yield different results. In this paper, we emphasize the differences<br>between these two representations, and provide a high-level discussion<br>about their advantages and limitations.
Export
BibTeX
@inproceedings{guenther13a, TITLE = {Notes on the Simplification of the {Morse}-{Smale} Complex}, AUTHOR = {Guenther, David and Reininghaus, Jan and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, ISBN = {978-3-319-04098-1}, DOI = {10.1007/978-3-319-04099-8_9}, PUBLISHER = {Springer}, YEAR = {2013}, DATE = {2014}, ABSTRACT = {The Morse-Smale complex can be either explicitly or implicitly represented.<br>Depending on the type of representation, the simplification of the<br>Morse-Smale complex works differently. In the explicit representation,<br>the Morse-Smale complex is directly simplified by explicitly reconnecting<br>the critical points during the simplification. In the implicit representation,<br>on the other hand, the Morse-Smale complex is given by a combinatorial<br>gradient field. In this setting, the simplification changes the combinatorial<br>flow, which yields an indirect simplification of the Morse-Smale<br>complex. The topological complexity of the Morse-Smale complex is<br>reduced in both representations. However, the simplifications generally<br>yield different results. In this paper, we emphasize the differences<br>between these two representations, and provide a high-level discussion<br>about their advantages and limitations.}, BOOKTITLE = {Topological Methods in Data Analysis and Visualization III (TopoInVis 2013)}, EDITOR = {Bremer, Peer-Timo and Hotz, Ingrid and Pascucci, Valerio and Peikert, Ronald}, PAGES = {135--150}, SERIES = {Mathematics and Visualization}, ADDRESS = {Davis, CA, USA}, }
Endnote
%0 Conference Proceedings %A Guenther, David %A Reininghaus, Jan %A Seidel, Hans-Peter %A Weinkauf, Tino %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Notes on the Simplification of the Morse-Smale Complex : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-52F3-3 %R 10.1007/978-3-319-04099-8_9 %D 2014 %B TopoInVis %Z date of event: 2013-03-04 - 2013-03-06 %C Davis, CA, USA %X The Morse-Smale complex can be either explicitly or implicitly represented.<br>Depending on the type of representation, the simplification of the<br>Morse-Smale complex works differently. In the explicit representation,<br>the Morse-Smale complex is directly simplified by explicitly reconnecting<br>the critical points during the simplification. In the implicit representation,<br>on the other hand, the Morse-Smale complex is given by a combinatorial<br>gradient field. In this setting, the simplification changes the combinatorial<br>flow, which yields an indirect simplification of the Morse-Smale<br>complex. The topological complexity of the Morse-Smale complex is<br>reduced in both representations. However, the simplifications generally<br>yield different results. In this paper, we emphasize the differences<br>between these two representations, and provide a high-level discussion<br>about their advantages and limitations. %B Topological Methods in Data Analysis and Visualization III %E Bremer, Peer-Timo; Hotz, Ingrid; Pascucci, Valerio; Peikert, Ronald %P 135 - 150 %I Springer %@ 978-3-319-04098-1 %B Mathematics and Visualization %U https://rdcu.be/dK3QD
Gryaditskaya, Y., Pouli, T., Reinhard, E., and Seidel, H.-P. 2014. Sky Based Light Metering for High Dynamic Range Images. Computer Graphics Forum (Proc. Pacific Graphics 2014)33, 7.
Abstract
Image calibration requires both linearization of pixel values and scaling so that values in the image correspond to real-world luminances. In this paper we focus on the latter and rather than rely on camera characterization, we calibrate images by analysing their content and metadata, obviating the need for expensive measuring devices or modeling of lens and camera combinations. Our analysis correlates sky pixel values to luminances that would be expected based on geographical metadata. Combined with high dynamic range (HDR) imaging, which gives us linear pixel data, our algorithm allows us to find absolute luminance values for each pixel—effectively turning digital cameras into absolute light meters. To validate our algorithm we have collected and annotated a calibrated set of HDR images and compared our estimation with several other approaches, showing that our approach is able to more accurately recover absolute luminance. We discuss various applications and demonstrate the utility of our method in the context of calibrated color appearance reproduction and lighting design.
Export
BibTeX
@article{CGF:Gryad:14, TITLE = {Sky Based Light Metering for High Dynamic Range Images}, AUTHOR = {Gryaditskaya, Yulia and Pouli, Tania and Reinhard, Erik and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1467-8659}, DOI = {10.1111/cgf.12474}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2014}, DATE = {2014}, ABSTRACT = {Image calibration requires both linearization of pixel values and scaling so that values in the image correspond to real-world luminances. In this paper we focus on the latter and rather than rely on camera characterization, we calibrate images by analysing their content and metadata, obviating the need for expensive measuring devices or modeling of lens and camera combinations. Our analysis correlates sky pixel values to luminances that would be expected based on geographical metadata. Combined with high dynamic range (HDR) imaging, which gives us linear pixel data, our algorithm allows us to find absolute luminance values for each pixel---effectively turning digital cameras into absolute light meters. To validate our algorithm we have collected and annotated a calibrated set of HDR images and compared our estimation with several other approaches, showing that our approach is able to more accurately recover absolute luminance. We discuss various applications and demonstrate the utility of our method in the context of calibrated color appearance reproduction and lighting design.}, JOURNAL = {Computer Graphics Forum (Proc. Pacific Graphics)}, VOLUME = {33}, NUMBER = {7}, PAGES = {61--69}, BOOKTITLE = {22nd Pacific Conference on Computer Graphics and Applications (Pacific Graphics 2014)}, }
Endnote
%0 Journal Article %A Gryaditskaya, Yulia %A Pouli, Tania %A Reinhard, Erik %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Sky Based Light Metering for High Dynamic Range Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-6C64-1 %R 10.1111/cgf.12474 %7 2014 %D 2014 %X Image calibration requires both linearization of pixel values and scaling so that values in the image correspond to real-world luminances. In this paper we focus on the latter and rather than rely on camera characterization, we calibrate images by analysing their content and metadata, obviating the need for expensive measuring devices or modeling of lens and camera combinations. Our analysis correlates sky pixel values to luminances that would be expected based on geographical metadata. Combined with high dynamic range (HDR) imaging, which gives us linear pixel data, our algorithm allows us to find absolute luminance values for each pixel&#8212;effectively turning digital cameras into absolute light meters. To validate our algorithm we have collected and annotated a calibrated set of HDR images and compared our estimation with several other approaches, showing that our approach is able to more accurately recover absolute luminance. We discuss various applications and demonstrate the utility of our method in the context of calibrated color appearance reproduction and lighting design. %J Computer Graphics Forum %V 33 %N 7 %& 61 %P 61 - 69 %I Wiley-Blackwell %C Oxford, UK %@ false %B 22nd Pacific Conference on Computer Graphics and Applications %O Pacific Graphics 2014 PG 2014 8 to 10 Oct 2014, Seoul, South Korea
Elek, O., Ritschel, T., Dachsbacher, C., and Seidel, H.-P. 2014a. Interactive Light Scattering with Principal-ordinate Propagation. Graphics Interface 2014, Canadian Information Processing Society.
Export
BibTeX
@inproceedings{ElekGI2014, TITLE = {Interactive Light Scattering with Principal-ordinate Propagation}, AUTHOR = {Elek, Oskar and Ritschel, Tobias and Dachsbacher, Carsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4822-6003-8}, URL = {https://graphicsinterface.org/proceedings/gi2014/gi2014-11/}, PUBLISHER = {Canadian Information Processing Society}, YEAR = {2014}, DATE = {2014}, BOOKTITLE = {Graphics Interface 2014}, EDITOR = {Kry, Paul G. and Bunt, Andrea}, PAGES = {87--94}, ADDRESS = {Montreal, Canada}, }
Endnote
%0 Conference Proceedings %A Elek, Oskar %A Ritschel, Tobias %A Dachsbacher, Carsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive Light Scattering with Principal-ordinate Propagation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5181-D %U https://graphicsinterface.org/proceedings/gi2014/gi2014-11/ %D 2014 %B Graphics Interface %Z date of event: 2014-05-07 - 2014-05-09 %C Montreal, Canada %B Graphics Interface 2014 %E Kry, Paul G.; Bunt, Andrea %P 87 - 94 %I Canadian Information Processing Society %@ 978-1-4822-6003-8
Elek, O., Bauszat, P., Ritschel, T., Magnor, M., and Seidel, H.-P. 2014b. Progressive Spectral Ray Differentials. VMV 2014 Vision, Modeling and Visualization, Eurographics Association.
Export
BibTeX
@inproceedings{ElekVMV2014, TITLE = {Progressive Spectral Ray Differentials}, AUTHOR = {Elek, Oskar and Bauszat, Pablo and Ritschel, Tobias and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-905674-74-3}, DOI = {10.2312/vmv.20141288}, PUBLISHER = {Eurographics Association}, YEAR = {2014}, DATE = {2014}, BOOKTITLE = {VMV 2014 Vision, Modeling and Visualization}, PAGES = {151--158}, ADDRESS = {Darmstadt, Germany}, }
Endnote
%0 Conference Proceedings %A Elek, Oskar %A Bauszat, Pablo %A Ritschel, Tobias %A Magnor, Marcus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Progressive Spectral Ray Differentials : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5176-5 %R 10.2312/vmv.20141288 %D 2014 %B 19th International Workshop on Vision, Modeling and Visualization %Z date of event: 2014-10-08 - 2014-10-10 %C Darmstadt, Germany %B VMV 2014 Vision, Modeling and Visualization %P 151 - 158 %I Eurographics Association %@ 978-3-905674-74-3
Elek, O., Ritschel, T., Dachsbacher, C., and Seidel, H.-P. 2014c. Principal-ordinates Propagation for Real-time Rendering of Participating Media. Computers & Graphics45.
Export
BibTeX
@article{ElekCAG2014, TITLE = {Principal-ordinates Propagation for Real-time Rendering of Participating Media}, AUTHOR = {Elek, Oskar and Ritschel, Tobias and Dachsbacher, Carsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0097-8493}, DOI = {10.1016/j.cag.2014.08.003}, PUBLISHER = {Elsevier}, ADDRESS = {Amsterdam}, YEAR = {2014}, DATE = {2014}, JOURNAL = {Computers \& Graphics}, VOLUME = {45}, PAGES = {28--39}, }
Endnote
%0 Journal Article %A Elek, Oskar %A Ritschel, Tobias %A Dachsbacher, Carsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Principal-ordinates Propagation for Real-time Rendering of Participating Media : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-516D-C %R 10.1016/j.cag.2014.08.003 %7 2014-09-06 %D 2014 %J Computers & Graphics %V 45 %& 28 %P 28 - 39 %I Elsevier %C Amsterdam %@ false
Elek, O., Bauszat, P., Ritschel, T., Magnor, M., and Seidel, H.-P. 2014d. Spectral Ray Differentials. Computer Graphics Forum (Proc. Eurographics Symposium on Rendering 2014)33, 4.
Export
BibTeX
@article{Elek2014EGSR, TITLE = {Spectral Ray Differentials}, AUTHOR = {Elek, Oskar and Bauszat, Pablo and Ritschel, Tobias and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12418}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2014}, DATE = {2014}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Rendering)}, VOLUME = {33}, NUMBER = {4}, PAGES = {113--122}, BOOKTITLE = {Eurographics Symposium on Rendering 2014}, EDITOR = {Wojciech, Jarosz and Peers, Pieter}, }
Endnote
%0 Journal Article %A Elek, Oskar %A Bauszat, Pablo %A Ritschel, Tobias %A Magnor, Marcus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Spectral Ray Differentials : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-4A77-B %R 10.1111/cgf.12418 %7 2014 %D 2014 %J Computer Graphics Forum %V 33 %N 4 %& 113 %P 113 - 122 %I Wiley-Blackwell %C Oxford %@ false %B Eurographics Symposium on Rendering 2014 %O Eurographics Symposium on Rendering 2014 EGSR 2014 Lyon, France, June 25th - 27th, 2014
Dabala, L., Kellnhofer, P., Ritschel, T., et al. 2014. Manipulating Refractive and Reflective Binocular Disparity. Computer Graphics Forum (Proc. EUROGRAPHICS 2014)33, 2.
Abstract
Presenting stereoscopic content on 3D displays is a challenging task, usually requiring manual adjustments. A number of techniques have been developed to aid this process, but they account for binocular disparity of surfaces that are diffuse and opaque only. However, combinations of transparent as well as specular materials are common in the real and virtual worlds, and pose a significant problem. For example, excessive disparities can be created which cannot be fused by the observer. Also, multiple stereo interpretations become possible, e. g., for glass, that both reflects and refracts, which may confuse the observer and result in poor 3D experience. In this work, we propose an efficient method for analyzing and controlling disparities in computer-generated images of such scenes where surface positions and a layer decomposition are available. Instead of assuming a single per-pixel disparity value, we estimate all possibly perceived disparities at each image location. Based on this representation, we define an optimization to find the best per-pixel camera parameters, assuring that all disparities can be easily fused by a human. A preliminary perceptual study indicates, that our approach combines comfortable viewing with realistic depiction of typical specular scenes.
Export
BibTeX
@article{Kellnhofer2014b, TITLE = {Manipulating Refractive and Reflective Binocular Disparity}, AUTHOR = {Dabala, Lukasz and Kellnhofer, Petr and Ritschel, Tobias and Didyk, Piotr and Templin, Krzysztof and Rokita, Przemyslaw and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1111/cgf.12290}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2014}, DATE = {2014}, ABSTRACT = {Presenting stereoscopic content on 3D displays is a challenging task, usually requiring manual adjustments. A number of techniques have been developed to aid this process, but they account for binocular disparity of surfaces that are diffuse and opaque only. However, combinations of transparent as well as specular materials are common in the real and virtual worlds, and pose a significant problem. For example, excessive disparities can be created which cannot be fused by the observer. Also, multiple stereo interpretations become possible, e. g., for glass, that both reflects and refracts, which may confuse the observer and result in poor 3D experience. In this work, we propose an efficient method for analyzing and controlling disparities in computer-generated images of such scenes where surface positions and a layer decomposition are available. Instead of assuming a single per-pixel disparity value, we estimate all possibly perceived disparities at each image location. Based on this representation, we define an optimization to find the best per-pixel camera parameters, assuring that all disparities can be easily fused by a human. A preliminary perceptual study indicates, that our approach combines comfortable viewing with realistic depiction of typical specular scenes.}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {33}, NUMBER = {2}, PAGES = {53--62}, BOOKTITLE = {EUROGRAPHICS 2014}, EDITOR = {L{\'e}vy, Bruno and Kautz, Jan}, }
Endnote
%0 Journal Article %A Dabala, Lukasz %A Kellnhofer, Petr %A Ritschel, Tobias %A Didyk, Piotr %A Templin, Krzysztof %A Rokita, Przemyslaw %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Manipulating Refractive and Reflective Binocular Disparity : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0019-EEF9-6 %R 10.1111/cgf.12290 %7 2014-06-01 %D 2014 %X Presenting stereoscopic content on 3D displays is a challenging task, usually requiring manual adjustments. A number of techniques have been developed to aid this process, but they account for binocular disparity of surfaces that are diffuse and opaque only. However, combinations of transparent as well as specular materials are common in the real and virtual worlds, and pose a significant problem. For example, excessive disparities can be created which cannot be fused by the observer. Also, multiple stereo interpretations become possible, e. g., for glass, that both reflects and refracts, which may confuse the observer and result in poor 3D experience. In this work, we propose an efficient method for analyzing and controlling disparities in computer-generated images of such scenes where surface positions and a layer decomposition are available. Instead of assuming a single per-pixel disparity value, we estimate all possibly perceived disparities at each image location. Based on this representation, we define an optimization to find the best per-pixel camera parameters, assuring that all disparities can be easily fused by a human. A preliminary perceptual study indicates, that our approach combines comfortable viewing with realistic depiction of typical specular scenes. %J Computer Graphics Forum %V 33 %N 2 %& 53 %P 53 - 62 %I Wiley-Blackwell %C Oxford, UK %B EUROGRAPHICS 2014 %O The European Association for Computer Graphics 35th Annual Conference ; Strasbourg, France, April 7th &#8211; 11th, 2014 EUROGRAPHICS 2014 EG 2014
Brunton, A., Wand, M., Wuhrer, S., Seidel, H.-P., and Weinkauf, T. 2014. A Low-dimensional Representation for Robust Partial Isometric Correspondences Computation. Graphical Models76, 2.
Abstract
Intrinsic shape matching has become the standard approach for pose invariant correspondence estimation among deformable shapes. Most existing approaches assume global consistency. While global isometric matching is well understood, only a few heuristic solutions are known for partial matching. Partial matching is particularly important for robustness to topological noise, which is a common problem in real-world scanner data. We introduce a new approach to partial isometric matching based on the observation that isometries are fully determined by local information: a map of a single point and its tangent space fixes an isometry. We develop a new representation for partial isometric maps based on equivalence classes of correspondences between pairs of points and their tangent-spaces. We apply our approach to register partial point clouds and compare it to the state-of-the-art methods, where we obtain significant improvements over global methods for real-world data and stronger guarantees than previous partial matching algorithms.
Export
BibTeX
@article{brunton13, TITLE = {A Low-dimensional Representation for Robust Partial Isometric Correspondences Computation}, AUTHOR = {Brunton, Alan and Wand, Michael and Wuhrer, Stefanie and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, ISSN = {1524-0703}, DOI = {10.1016/j.gmod.2013.11.003}, PUBLISHER = {Academic Press}, ADDRESS = {San Diego, CA}, YEAR = {2014}, DATE = {2014}, ABSTRACT = {Intrinsic shape matching has become the standard approach for pose invariant correspondence estimation among deformable shapes. Most existing approaches assume global consistency. While global isometric matching is well understood, only a few heuristic solutions are known for partial matching. Partial matching is particularly important for robustness to topological noise, which is a common problem in real-world scanner data. We introduce a new approach to partial isometric matching based on the observation that isometries are fully determined by local information: a map of a single point and its tangent space fixes an isometry. We develop a new representation for partial isometric maps based on equivalence classes of correspondences between pairs of points and their tangent-spaces. We apply our approach to register partial point clouds and compare it to the state-of-the-art methods, where we obtain significant improvements over global methods for real-world data and stronger guarantees than previous partial matching algorithms.}, JOURNAL = {Graphical Models}, VOLUME = {76}, NUMBER = {2}, PAGES = {70--85}, }
Endnote
%0 Journal Article %A Brunton, Alan %A Wand, Michael %A Wuhrer, Stefanie %A Seidel, Hans-Peter %A Weinkauf, Tino %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Low-dimensional Representation for Robust Partial Isometric Correspondences Computation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-F6E9-5 %R 10.1016/j.gmod.2013.11.003 %7 2013-12-15 %D 2014 %X Intrinsic shape matching has become the standard approach for pose invariant correspondence estimation among deformable shapes. Most existing approaches assume global consistency. While global isometric matching is well understood, only a few heuristic solutions are known for partial matching. Partial matching is particularly important for robustness to topological noise, which is a common problem in real-world scanner data. We introduce a new approach to partial isometric matching based on the observation that isometries are fully determined by local information: a map of a single point and its tangent space fixes an isometry. We develop a new representation for partial isometric maps based on equivalence classes of correspondences between pairs of points and their tangent-spaces. We apply our approach to register partial point clouds and compare it to the state-of-the-art methods, where we obtain significant improvements over global methods for real-world data and stronger guarantees than previous partial matching algorithms. %J Graphical Models %V 76 %N 2 %& 70 %P 70 - 85 %I Academic Press %C San Diego, CA %@ false
2013
Wang, Z., Grochulla, M.P., Thormählen, T., and Seidel, H.-P. 2013. 3D Face Template Registration Using Normal Maps. 3DV 2013, International Conference on 3D Vision, IEEE Computer Society.
Export
BibTeX
@inproceedings{Wang2013, TITLE = {{3D} Face Template Registration Using Normal Maps}, AUTHOR = {Wang, Zhongjie and Grochulla, Martin Peter and Thorm{\"a}hlen, Thorsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-0-7695-5067-1}, DOI = {10.1109/3DV.2013.46}, LOCALID = {Local-ID: 220FFD3372EB9C04C1257C6000528BF3-Wang2013}, PUBLISHER = {IEEE Computer Society}, YEAR = {2013}, DATE = {2013}, BOOKTITLE = {3DV 2013, International Conference on 3D Vision}, EDITOR = {Guerrero, Juan E.}, PAGES = {295--302}, ADDRESS = {Seattle, WA, USA}, }
Endnote
%0 Conference Proceedings %A Wang, Zhongjie %A Grochulla, Martin Peter %A Thorm&#228;hlen, Thorsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T 3D Face Template Registration Using Normal Maps : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-1CEC-B %R 10.1109/3DV.2013.46 %F OTHER: Local-ID: 220FFD3372EB9C04C1257C6000528BF3-Wang2013 %D 2013 %B International Conference on 3D Vision %Z date of event: 2013-06-29 - 2013-07-01 %C Seattle, WA, USA %B 3DV 2013 %E Guerrero, Juan E. %P 295 - 302 %I IEEE Computer Society %@ 978-0-7695-5067-1
Von Tycowicz, C., Schulz, C., Seidel, H.-P., and Hildebrandt, K. 2013. An Efficient Construction of Reduced Deformable Objects. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2013)32, 6.
Abstract
Many efficient computational methods for physical simulation are based on model <br>reduction. We propose new model reduction techniques for the <br>\emphapproximation of reduced forces} and <br>for the \emph{construction of reduced shape spaces of deformable objects that <br>accelerate<br>the construction of a reduced dynamical system, increase the accuracy<br>of the approximation, and simplify the implementation of model<br>reduction. Based on the techniques, we introduce schemes for real-time<br>simulation of deformable objects and interactive deformation-based editing <br>of triangle or tet meshes. We demonstrate the effectiveness of the new <br>techniques <br>in different experiments with elastic solids and shells and compare them to <br>alternative approaches.
Export
BibTeX
@article{Hildebrandt2013, TITLE = {An Efficient Construction of Reduced Deformable Objects}, AUTHOR = {von Tycowicz, Christoph and Schulz, Christian and Seidel, Hans-Peter and Hildebrandt, Klaus}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2508363.2508392}, LOCALID = {Local-ID: CBFBAC90E4E008EDC1257C240031E997-Hildebrandt2013}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2013}, DATE = {2013}, ABSTRACT = {Many efficient computational methods for physical simulation are based on model <br>reduction. We propose new model reduction techniques for the <br>\emphapproximation of reduced forces} and <br>for the \emph{construction of reduced shape spaces of deformable objects that <br>accelerate<br>the construction of a reduced dynamical system, increase the accuracy<br>of the approximation, and simplify the implementation of model<br>reduction. Based on the techniques, we introduce schemes for real-time<br>simulation of deformable objects and interactive deformation-based editing <br>of triangle or tet meshes. We demonstrate the effectiveness of the new <br>techniques <br>in different experiments with elastic solids and shells and compare them to <br>alternative approaches.}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {32}, NUMBER = {6}, PAGES = {1--10}, EID = {213}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2013}, }
Endnote
%0 Journal Article %A von Tycowicz, Christoph %A Schulz, Christian %A Seidel, Hans-Peter %A Hildebrandt, Klaus %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T An Efficient Construction of Reduced Deformable Objects : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-3A34-A %R 10.1145/2508363.2508392 %F OTHER: Local-ID: CBFBAC90E4E008EDC1257C240031E997-Hildebrandt2013 %7 2013 %D 2013 %X Many efficient computational methods for physical simulation are based on model <br>reduction. We propose new model reduction techniques for the <br>\emphapproximation of reduced forces} and <br>for the \emph{construction of reduced shape spaces of deformable objects that <br>accelerate<br>the construction of a reduced dynamical system, increase the accuracy<br>of the approximation, and simplify the implementation of model<br>reduction. Based on the techniques, we introduce schemes for real-time<br>simulation of deformable objects and interactive deformation-based editing <br>of triangle or tet meshes. We demonstrate the effectiveness of the new <br>techniques <br>in different experiments with elastic solids and shells and compare them to <br>alternative approaches. %J ACM Transactions on Graphics %V 32 %N 6 %& 1 %P 1 - 10 %Z sequence number: 213 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2013 %O ACM SIGGRAPH Asia 2013 Hong Kong, 19 - 22 November 2013
Sunkel, M., Jansen, S., Wand, M., and Seidel, H.-P. 2013. A Correlated Parts Model for Object Detection in Large 3D Scans. Computer Graphics Forum (Proc. EUROGRAPHICS 2013)32, 2.
Abstract
This paper addresses the problem of detecting objects in 3D scans according to object classes learned from sparse user annotation. We model objects belonging to a class by a set of fully correlated parts, encoding dependencies between local shapes of different parts as well as their relative spatial arrangement. For an efficient and comprehensive retrieval of instances belonging to a class of interest, we introduce a new approximate inference scheme and a corresponding planning procedure. We extend our technique to hierarchical composite structures, reducing training effort and modeling spatial relations between detected instances. We evaluate our method on a number of real-world 3D scans and demonstrate its benefits as well as the performance of the new inference algorithm.
Export
BibTeX
@article{Sunkel2013, TITLE = {A Correlated Parts Model for Object Detection in Large {3D} Scans}, AUTHOR = {Sunkel, Martin and Jansen, Silke and Wand, Michael and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12040}, LOCALID = {Local-ID: 71E3D133D260E612C1257B0400475765-Sunkel2013}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2013}, DATE = {2013}, ABSTRACT = {This paper addresses the problem of detecting objects in 3D scans according to object classes learned from sparse user annotation. We model objects belonging to a class by a set of fully correlated parts, encoding dependencies between local shapes of different parts as well as their relative spatial arrangement. For an efficient and comprehensive retrieval of instances belonging to a class of interest, we introduce a new approximate inference scheme and a corresponding planning procedure. We extend our technique to hierarchical composite structures, reducing training effort and modeling spatial relations between detected instances. We evaluate our method on a number of real-world 3D scans and demonstrate its benefits as well as the performance of the new inference algorithm.}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {32}, NUMBER = {2}, PAGES = {205--214}, BOOKTITLE = {EUROGRAPHICS 2013}, EDITOR = {Poulin, P. and Navazo, I.}, }
Endnote
%0 Journal Article %A Sunkel, Martin %A Jansen, Silke %A Wand, Michael %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Correlated Parts Model for Object Detection in Large 3D Scans : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-1CE6-8 %R 10.1111/cgf.12040 %F OTHER: Local-ID: 71E3D133D260E612C1257B0400475765-Sunkel2013 %7 2013-05-06 %D 2013 %X This paper addresses the problem of detecting objects in 3D scans according to object classes learned from sparse user annotation. We model objects belonging to a class by a set of fully correlated parts, encoding dependencies between local shapes of different parts as well as their relative spatial arrangement. For an efficient and comprehensive retrieval of instances belonging to a class of interest, we introduce a new approximate inference scheme and a corresponding planning procedure. We extend our technique to hierarchical composite structures, reducing training effort and modeling spatial relations between detected instances. We evaluate our method on a number of real-world 3D scans and demonstrate its benefits as well as the performance of the new inference algorithm. %J Computer Graphics Forum %V 32 %N 2 %& 205 %P 205 - 214 %I Wiley-Blackwell %C Oxford, UK %@ false %B EUROGRAPHICS 2013 %O EG 2013 EUROGRAPHICS 2013 The European Association for Computer Graphics 34th Annual Conference ; Girona, Spain, May 6th &#8211; 10th, 2013
Scherbaum, K., Petterson, J., Feris, R.S., Blanz, V., and Seidel, H.-P. 2013. Fast Face Detector Training Using Tailored Views. ICCV 2013, IEEE International Conference on Computer Vision, IEEE Computer Society.
Export
BibTeX
@inproceedings{Scherbaum2013, TITLE = {Fast Face Detector Training Using Tailored Views}, AUTHOR = {Scherbaum, Kristina and Petterson, James and Feris, Rogerio S. and Blanz, Volker and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1550-5499}, ISBN = {978-1-4799-2839-2}, DOI = {10.1109/ICCV.2013.354}, LOCALID = {Local-ID: BBE1AD1B44792B41C1257C600050C266-Scherbaum2013}, PUBLISHER = {IEEE Computer Society}, YEAR = {2013}, DATE = {2013}, BOOKTITLE = {ICCV 2013, IEEE International Conference on Computer Vision}, PAGES = {2848--2855}, ADDRESS = {Sydney, Australia}, }
Endnote
%0 Conference Proceedings %A Scherbaum, Kristina %A Petterson, James %A Feris, Rogerio S. %A Blanz, Volker %A Seidel, Hans-Peter %+ Cluster of Excellence Multimodal Computing and Interaction External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Fast Face Detector Training Using Tailored Views : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0019-7AC0-9 %R 10.1109/ICCV.2013.354 %F OTHER: Local-ID: BBE1AD1B44792B41C1257C600050C266-Scherbaum2013 %D 2013 %B IEEE International Conference on Computer Vision %Z date of event: 2013-12-01 - 2013-12-08 %C Sydney, Australia %B ICCV 2013 %P 2848 - 2855 %I IEEE Computer Society %@ false
Reshetouski, I., Manakov, A., Bhandari, A., Raskar, R., Seidel, H.-P., and Ihrke, I. 2013. Discovering the Structure of a Planar Mirror System from Multiple Observations of a Single Point. 2013 IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2013), IEEE Computer Society.
Export
BibTeX
@inproceedings{DBLP:conf/cvpr/ReshetouskiMBRSI13, TITLE = {Discovering the Structure of a Planar Mirror System from Multiple Observations of a Single Point}, AUTHOR = {Reshetouski, Ilya and Manakov, Alkhazur and Bhandari, Ayush and Raskar, Ramesh and Seidel, Hans-Peter and Ihrke, Ivo}, LANGUAGE = {eng}, ISBN = {978-1-5386-5672-3}, DOI = {10.1109/CVPR.2013.19}, PUBLISHER = {IEEE Computer Society}, YEAR = {2013}, DATE = {2013}, BOOKTITLE = {2013 IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2013)}, PAGES = {89--96}, ADDRESS = {Portland, OR, USA}, }
Endnote
%0 Conference Proceedings %A Reshetouski, Ilya %A Manakov, Alkhazur %A Bhandari, Ayush %A Raskar, Ramesh %A Seidel, Hans-Peter %A Ihrke, Ivo %+ External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Discovering the Structure of a Planar Mirror System from Multiple Observations of a Single Point : %G eng %U http://hdl.handle.net/21.11116/0000-000F-6BFF-B %R 10.1109/CVPR.2013.19 %D 2013 %B 2013 IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2013-06-23 - 2013-06-28 %C Portland, OR, USA %B 2013 IEEE Conference on Computer Vision and Pattern Recognition %P 89 - 96 %I IEEE Computer Society %@ 978-1-5386-5672-3
Reinhard, E., Efros, A., Kautz, J., and Seidel, H.-P. 2013. On Visual Realism of Synthesized Imagery. Proceedings of the IEEE101, 9.
Export
BibTeX
@article{Reinhard2013a, TITLE = {On Visual Realism of Synthesized Imagery}, AUTHOR = {Reinhard, Erik and Efros, Alexei and Kautz, Jan and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0018-9219}, DOI = {10.1109/JPROC.2013.2260711}, LOCALID = {Local-ID: 87D8785C8741C366C1257B820045FF96-Reinhard2013a}, PUBLISHER = {IEEE}, ADDRESS = {Piscataway, NJ}, YEAR = {2013}, DATE = {2013}, JOURNAL = {Proceedings of the IEEE}, VOLUME = {101}, NUMBER = {9}, PAGES = {1998--2007}, }
Endnote
%0 Journal Article %A Reinhard, Erik %A Efros, Alexei %A Kautz, Jan %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T On Visual Realism of Synthesized Imagery : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-3E31-1 %R 10.1109/JPROC.2013.2260711 %F OTHER: Local-ID: 87D8785C8741C366C1257B820045FF96-Reinhard2013a %7 2013-07-25 %D 2013 %J Proceedings of the IEEE %O Proc. IEEE %V 101 %N 9 %& 1998 %P 1998 - 2007 %I IEEE %C Piscataway, NJ %@ false
Reinert, B., Ritschel, T., and Seidel, H.-P. 2013. Interactive By-example Design of Artistic Packing Layouts. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2013)32, 6.
Abstract
We propose an approach to �pack� a set of two-dimensional graphical primitives into a spatial layout that follows artistic goals. We formalize this process as projecting from a high-dimensional feature space into a 2D layout. Our system does not expose the control of this projection to the user in form of sliders or similar interfaces. Instead, we infer the desired layout of all primitives from interactive placement of a small subset of example primitives. To produce a pleasant distribution of primitives with spatial extend, we produce a pleasant distribution of primitives with spatial extend, we propose a novel generalization of Centroidal Voronoi Tesselation which equalizes the distances between boundaries of nearby primitives. Compared to previous primitive distribution approaches our GPU implementation achieves both better fidelity and asymptotically higher speed. A user study evaluates the system�s usability.
Export
BibTeX
@article{Reinert2013, TITLE = {Interactive By-example Design of Artistic Packing Layouts}, AUTHOR = {Reinert, Bernhard and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2508363.2508409}, LOCALID = {Local-ID: 7A381077C9181F50C1257C6F004CC475-Reinert2013}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2013}, DATE = {2013}, ABSTRACT = {We propose an approach to {\diamond}pack{\diamond} a set of two-dimensional graphical primitives into a spatial layout that follows artistic goals. We formalize this process as projecting from a high-dimensional feature space into a 2D layout. Our system does not expose the control of this projection to the user in form of sliders or similar interfaces. Instead, we infer the desired layout of all primitives from interactive placement of a small subset of example primitives. To produce a pleasant distribution of primitives with spatial extend, we produce a pleasant distribution of primitives with spatial extend, we propose a novel generalization of Centroidal Voronoi Tesselation which equalizes the distances between boundaries of nearby primitives. Compared to previous primitive distribution approaches our GPU implementation achieves both better fidelity and asymptotically higher speed. A user study evaluates the system{\diamond}s usability.}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {32}, NUMBER = {6}, PAGES = {1--7}, EID = {218}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2013}, }
Endnote
%0 Journal Article %A Reinert, Bernhard %A Ritschel, Tobias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive By-example Design of Artistic Packing Layouts : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-18D8-6 %R 10.1145/2508363.2508409 %F OTHER: Local-ID: 7A381077C9181F50C1257C6F004CC475-Reinert2013 %D 2013 %X We propose an approach to &#65533;pack&#65533; a set of two-dimensional graphical primitives into a spatial layout that follows artistic goals. We formalize this process as projecting from a high-dimensional feature space into a 2D layout. Our system does not expose the control of this projection to the user in form of sliders or similar interfaces. Instead, we infer the desired layout of all primitives from interactive placement of a small subset of example primitives. To produce a pleasant distribution of primitives with spatial extend, we produce a pleasant distribution of primitives with spatial extend, we propose a novel generalization of Centroidal Voronoi Tesselation which equalizes the distances between boundaries of nearby primitives. Compared to previous primitive distribution approaches our GPU implementation achieves both better fidelity and asymptotically higher speed. A user study evaluates the system&#65533;s usability. %J ACM Transactions on Graphics %V 32 %N 6 %& 1 %P 1 - 7 %Z sequence number: 218 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2013 %O ACM SIGGRAPH Asia 2013 Hong Kong, 19 - 22 November 2013
Pouli, T., Artusi, A., Banterle, F., Akyüz, A.O., Seidel, H.-P., and Reinhard, E. 2013. Color Correction for Tone Reproduction. 21st Color and Imaging Conference Final Program and Proceedings (CIC 2013), IS&T.
Export
BibTeX
@inproceedings{PouliCIC21, TITLE = {Color Correction for Tone Reproduction}, AUTHOR = {Pouli, Tania and Artusi, Alessandro and Banterle, Francesco and Aky{\"u}z, Ahmet O. and Seidel, Hans-Peter and Reinhard, Erik}, LANGUAGE = {eng}, DOI = {10.2352/CIC.2013.21.1.art00039}, PUBLISHER = {IS\&T}, YEAR = {2013}, DATE = {2013}, BOOKTITLE = {21st Color and Imaging Conference Final Program and Proceedings (CIC 2013)}, PAGES = {215--220}, ADDRESS = {Albuquerque, NM, USA}, }
Endnote
%0 Conference Proceedings %A Pouli, Tania %A Artusi, Alessandro %A Banterle, Francesco %A Aky&#252;z, Ahmet O. %A Seidel, Hans-Peter %A Reinhard, Erik %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Color Correction for Tone Reproduction : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-A2C6-E %R 10.2352/CIC.2013.21.1.art00039 %D 2013 %B 21st Color and Imaging Conference %Z date of event: 2013-11-04 - 2013-11-08 %C Albuquerque, NM, USA %B 21st Color and Imaging Conference Final Program and Proceedings %P 215 - 220 %I IS&T
Nguyen, C., Scherzer, D., Ritschel, T., and Seidel, H.-P. 2013. Material Editing in Complex Scenes by Surface Light Field Manipulation and Reflectance Optimization. Computer Graphics Forum (Proc. EUROGRAPHICS 2013)32, 2.
Abstract
This work addresses the challenge of intuitive appearance editing in scenes with complex geometric layout and complex, spatially-varying indirect lighting. In contrast to previous work, that aimed to edit surface reflectance, our system allows a user to freely manipulate the surface light field. It then finds the best surface reflectance that ``explains'' the surface light field manipulation. Instead of classic \mathcal L_2 fitting of reflectance to a combination of incoming and exitant illumination, our system infers a sparse \mathcal L_0 change of shading parameters instead. Consequently, our system does not require ``diffuse'' or ``glossiness'' brushes or any such understanding of the underlying reflectance parametrization. Instead, it infers reflectance changes from scribbles made by a single simple color brush tool alone: Drawing a highlight will increase Phong specular; blurring a mirror reflection will decrease glossiness; etc. A sparse-solver framework operating on a novel point-based, pre-convolved lighting representation in combination with screen-space edit upsampling allows to perform editing interactively on a GPU.
Export
BibTeX
@article{Nguyen2013, TITLE = {Material Editing in Complex Scenes by Surface Light Field Manipulation and Reflectance Optimization}, AUTHOR = {Nguyen, Chuong and Scherzer, Daniel and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12038}, LOCALID = {Local-ID: 4CD3871C310E2855C1257B010065285A-Nguyen2013}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2013}, DATE = {2013}, ABSTRACT = {This work addresses the challenge of intuitive appearance editing in scenes with complex geometric layout and complex, spatially-varying indirect lighting. In contrast to previous work, that aimed to edit surface reflectance, our system allows a user to freely manipulate the surface light field. It then finds the best surface reflectance that ``explains'' the surface light field manipulation. Instead of classic \mathcal L_2 fitting of reflectance to a combination of incoming and exitant illumination, our system infers a sparse \mathcal L_0 change of shading parameters instead. Consequently, our system does not require ``diffuse'' or ``glossiness'' brushes or any such understanding of the underlying reflectance parametrization. Instead, it infers reflectance changes from scribbles made by a single simple color brush tool alone: Drawing a highlight will increase Phong specular; blurring a mirror reflection will decrease glossiness; etc. A sparse-solver framework operating on a novel point-based, pre-convolved lighting representation in combination with screen-space edit upsampling allows to perform editing interactively on a GPU.}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {32}, NUMBER = {2}, PAGES = {185--194}, BOOKTITLE = {EUROGRAPHICS 2013}, EDITOR = {Poulin, P. and Navazo, I.}, }
Endnote
%0 Journal Article %A Nguyen, Chuong %A Scherzer, Daniel %A Ritschel, Tobias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Material Editing in Complex Scenes by Surface Light Field Manipulation and Reflectance Optimization : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-3810-8 %R 10.1111/cgf.12038 %F OTHER: Local-ID: 4CD3871C310E2855C1257B010065285A-Nguyen2013 %7 2013-05-06 %D 2013 %X This work addresses the challenge of intuitive appearance editing in scenes with complex geometric layout and complex, spatially-varying indirect lighting. In contrast to previous work, that aimed to edit surface reflectance, our system allows a user to freely manipulate the surface light field. It then finds the best surface reflectance that ``explains'' the surface light field manipulation. Instead of classic \mathcal L_2 fitting of reflectance to a combination of incoming and exitant illumination, our system infers a sparse \mathcal L_0 change of shading parameters instead. Consequently, our system does not require ``diffuse'' or ``glossiness'' brushes or any such understanding of the underlying reflectance parametrization. Instead, it infers reflectance changes from scribbles made by a single simple color brush tool alone: Drawing a highlight will increase Phong specular; blurring a mirror reflection will decrease glossiness; etc. A sparse-solver framework operating on a novel point-based, pre-convolved lighting representation in combination with screen-space edit upsampling allows to perform editing interactively on a GPU. %J Computer Graphics Forum %V 32 %N 2 %& 185 %P 185 - 194 %I Wiley-Blackwell %C Oxford, UK %@ false %B EUROGRAPHICS 2013 %O EUROGRAPHICS 2013 EG 2013 The European Association for Computer Graphics 34th Annual Conference ; Girona, Spain, May 6th - 10th, 2013
Milliez, A., Wand, M., Cani, M.-P., and Seidel, H.-P. 2013. Mutable Elastic Models for Sculpting Structured Shapes. Computer Graphics Forum (Proc. EUROGRAPHICS 2013)32, 2.
Export
BibTeX
@article{Milliez2013, TITLE = {Mutable Elastic Models for Sculpting Structured Shapes}, AUTHOR = {Milliez, Antoine and Wand, Michael and Cani, Marie-Paule and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12022}, LOCALID = {Local-ID: 54D78E6C8E10AB4CC1257C130048CEEA-Milliez2013}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2013}, DATE = {2013}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {32}, NUMBER = {2}, PAGES = {21--30}, BOOKTITLE = {EUROGRAPHICS 2013}, EDITOR = {Poulin, Pierre and Navazo, Isabel}, }
Endnote
%0 Journal Article %A Milliez, Antoine %A Wand, Michael %A Cani, Marie-Paule %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Mutable Elastic Models for Sculpting Structured Shapes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-3CCE-2 %R 10.1111/cgf.12022 %F OTHER: Local-ID: 54D78E6C8E10AB4CC1257C130048CEEA-Milliez2013 %7 2013-05-06 %D 2013 %J Computer Graphics Forum %V 32 %N 2 %& 21 %P 21 - 30 %I Wiley-Blackwell %C Oxford, UK %@ false %B EUROGRAPHICS 2013 %O EUROGRAPHICS 2013 The European Association for Computer Graphics 34th Annual Conference ; Girona, Spain, May 6th - 10th, 2013 EG 2013
Manakov, A., Restrepo, J.F., Klehm, O., et al. 2013. A Reconfigurable Camera Add-on for High Dynamic Range, Multi-spectral, Polarization, and Light-field Imaging. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2013)32, 4.
Abstract
We propose a non-permanent add-on that enables plenoptic imaging with standard <br>cameras. Our design is based on a physical copying mechanism that multiplies a <br>sensor image into a number of identical copies that still carry the plenoptic <br>information of interest. Via different optical filters, we can then recover the <br>desired information. A minor modification of the design also allows for <br>aperture sub-sampling and, hence, light-field imaging. As the filters in our <br>design are exchangeable, a reconfiguration for different imaging purposes is <br>possible. We show in a prototype setup that high dynamic range, multispectral, <br>polarization, and light-field imaging can be achieved with our design.
Export
BibTeX
@article{Manakov2013, TITLE = {A Reconfigurable Camera Add-on for High Dynamic Range, Multi-spectral, Polarization, and Light-field Imaging}, AUTHOR = {Manakov, Alkhazur and Restrepo, John F. and Klehm, Oliver and Heged{\"u}s, Ramon and Eisemann, Elmar and Seidel, Hans-Peter and Ihrke, Ivo}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2461912.2461937}, LOCALID = {Local-ID: 2AF094BD6240B2D2C1257C13003B6CBD-Manakov2013}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2013}, DATE = {2013}, ABSTRACT = {We propose a non-permanent add-on that enables plenoptic imaging with standard <br>cameras. Our design is based on a physical copying mechanism that multiplies a <br>sensor image into a number of identical copies that still carry the plenoptic <br>information of interest. Via different optical filters, we can then recover the <br>desired information. A minor modification of the design also allows for <br>aperture sub-sampling and, hence, light-field imaging. As the filters in our <br>design are exchangeable, a reconfiguration for different imaging purposes is <br>possible. We show in a prototype setup that high dynamic range, multispectral, <br>polarization, and light-field imaging can be achieved with our design.}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {32}, NUMBER = {4}, PAGES = {1--14}, EID = {47}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2013}, }
Endnote
%0 Journal Article %A Manakov, Alkhazur %A Restrepo, John F. %A Klehm, Oliver %A Heged&#252;s, Ramon %A Eisemann, Elmar %A Seidel, Hans-Peter %A Ihrke, Ivo %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T A Reconfigurable Camera Add-on for High Dynamic Range, Multi-spectral, Polarization, and Light-field Imaging : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-3CDD-F %R 10.1145/2461912.2461937 %F OTHER: Local-ID: 2AF094BD6240B2D2C1257C13003B6CBD-Manakov2013 %7 2013 %D 2013 %X We propose a non-permanent add-on that enables plenoptic imaging with standard <br>cameras. Our design is based on a physical copying mechanism that multiplies a <br>sensor image into a number of identical copies that still carry the plenoptic <br>information of interest. Via different optical filters, we can then recover the <br>desired information. A minor modification of the design also allows for <br>aperture sub-sampling and, hence, light-field imaging. As the filters in our <br>design are exchangeable, a reconfiguration for different imaging purposes is <br>possible. We show in a prototype setup that high dynamic range, multispectral, <br>polarization, and light-field imaging can be achieved with our design. %J ACM Transactions on Graphics %V 32 %N 4 %& 1 %P 1 - 14 %Z sequence number: 47 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2013 %O ACM SIGGRAPH 2013 Anaheim, California, 21 - 25 July 2013
Liu, Y., Gall, J., Stoll, C., Dai, Q., Seidel, H.-P., and Theobalt, C. 2013. Markerless Motion Capture of Multiple Characters Using Multiview Image Segmentation. IEEE Transactions on Pattern Analysis and Machine Intelligence35, 11.
Export
BibTeX
@article{LiuPami2013, TITLE = {Markerless Motion Capture of Multiple Characters Using Multiview Image Segmentation}, AUTHOR = {Liu, Yebin and Gall, J{\"u}rgen and Stoll, Carsten and Dai, Qionghai and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0162-8828}, DOI = {10.1109/TPAMI.2013.47}, LOCALID = {Local-ID: 3A056CE707FBCCD9C1257C6000533A6F-LiuPami2013}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {Los Alamitos, CA}, YEAR = {2013}, DATE = {2013}, JOURNAL = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, VOLUME = {35}, NUMBER = {11}, PAGES = {2720--2735}, }
Endnote
%0 Journal Article %A Liu, Yebin %A Gall, J&#252;rgen %A Stoll, Carsten %A Dai, Qionghai %A Seidel, Hans-Peter %A Theobalt, Christian %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Markerless Motion Capture of Multiple Characters Using Multiview Image Segmentation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-3937-8 %R 10.1109/TPAMI.2013.47 %F OTHER: Local-ID: 3A056CE707FBCCD9C1257C6000533A6F-LiuPami2013 %7 2013-02-21 %D 2013 %J IEEE Transactions on Pattern Analysis and Machine Intelligence %O IEEE Trans. Pattern Anal. Mach. Intell. %V 35 %N 11 %& 2720 %P 2720 - 2735 %I IEEE Computer Society %C Los Alamitos, CA %@ false
Lee, S., Sips, M., and Seidel, H.-P. 2013. Perceptually Driven Visibility Optimization for Categorical Data Visualization. IEEE Transactions on Visualization and Computer Graphics19, 10.
Export
BibTeX
@article{Seidel2013, TITLE = {Perceptually Driven Visibility Optimization for Categorical Data Visualization}, AUTHOR = {Lee, Sungkil and Sips, Mike and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2012.315}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {Los Alamitos, CA}, YEAR = {2013}, DATE = {2013}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics}, VOLUME = {19}, NUMBER = {10}, PAGES = {1746--1757}, }
Endnote
%0 Journal Article %A Lee, Sungkil %A Sips, Mike %A Seidel, Hans-Peter %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptually Driven Visibility Optimization for Categorical Data Visualization : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0018-A9FB-0 %R 10.1109/TVCG.2012.315 %7 2012-11-30 %D 2013 %J IEEE Transactions on Visualization and Computer Graphics %V 19 %N 10 %& 1746 %P 1746 - 1757 %I IEEE Computer Society %C Los Alamitos, CA %@ false
Kurz, C., Ritschel, T., Eisemann, E., Thormählen, T., and Seidel, H.-P. 2013. Generating Realistic Camera Shake for Virtual Scenes. Journal of Virtual Reality and Broadcasting10, 7.
Abstract
When depicting both virtual and physical worlds, the viewer's impression of presence in these worlds is strongly linked to camera motion. Plausible and artist-controlled camera movement can substantially increase scene immersion. While physical camera motion exhibits subtle details of position, rotation, and acceleration, these details are often missing for virtual camera motion. In this work, we analyze camera movement using signal theory. Our system allows us to stylize a smooth user-defined virtual base camera motion by enriching it with plausible details. A key component of our system is a database of videos filmed by physical cameras. These videos are analyzed with a camera-motion estimation algorithm (structure-from-motion) and labeled manually with a specific style. By considering spectral properties of location, orientation and acceleration, our solution learns camera motion details. Consequently, an arbitrary virtual base motion, defined in any conventional animation package, can be automatically modified according to a user-selected style. In an animation package the camera motion base path is typically defined by the user via function curves. Another possibility is to obtain the camera path by using a mixed reality camera in motion capturing studio. As shown in our experiments, the resulting shots are still fully artist-controlled, but appear richer and more physically plausible.
Export
BibTeX
@article{Kurz2013, TITLE = {Generating Realistic Camera Shake for Virtual Scenes}, AUTHOR = {Kurz, Christian and Ritschel, Tobias and Eisemann, Elmar and Thorm{\"a}hlen, Thorsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1860-2037}, URL = {urn:nbn:de:0009-6-38335}, LOCALID = {Local-ID: 43DB142DAE2CF97AC1257C69005B2D67-Kurz2013}, PUBLISHER = {Hochschulbibliothekszentrum des Landes Nordrhein-Westfalen, K{\"o}ln (HBZ)}, ADDRESS = {K{\"o}ln}, YEAR = {2013}, ABSTRACT = {When depicting both virtual and physical worlds, the viewer's impression of presence in these worlds is strongly linked to camera motion. Plausible and artist-controlled camera movement can substantially increase scene immersion. While physical camera motion exhibits subtle details of position, rotation, and acceleration, these details are often missing for virtual camera motion. In this work, we analyze camera movement using signal theory. Our system allows us to stylize a smooth user-defined virtual base camera motion by enriching it with plausible details. A key component of our system is a database of videos filmed by physical cameras. These videos are analyzed with a camera-motion estimation algorithm (structure-from-motion) and labeled manually with a specific style. By considering spectral properties of location, orientation and acceleration, our solution learns camera motion details. Consequently, an arbitrary virtual base motion, defined in any conventional animation package, can be automatically modified according to a user-selected style. In an animation package the camera motion base path is typically defined by the user via function curves. Another possibility is to obtain the camera path by using a mixed reality camera in motion capturing studio. As shown in our experiments, the resulting shots are still fully artist-controlled, but appear richer and more physically plausible.}, JOURNAL = {Journal of Virtual Reality and Broadcasting}, VOLUME = {10}, NUMBER = {7}, PAGES = {1--13}, }
Endnote
%0 Journal Article %A Kurz, Christian %A Ritschel, Tobias %A Eisemann, Elmar %A Thorm&#228;hlen, Thorsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Generating Realistic Camera Shake for Virtual Scenes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-196D-1 %F OTHER: Local-ID: 43DB142DAE2CF97AC1257C69005B2D67-Kurz2013 %U urn:nbn:de:0009-6-38335 %7 2013 %D 2013 %X When depicting both virtual and physical worlds, the viewer's impression of presence in these worlds is strongly linked to camera motion. Plausible and artist-controlled camera movement can substantially increase scene immersion. While physical camera motion exhibits subtle details of position, rotation, and acceleration, these details are often missing for virtual camera motion. In this work, we analyze camera movement using signal theory. Our system allows us to stylize a smooth user-defined virtual base camera motion by enriching it with plausible details. A key component of our system is a database of videos filmed by physical cameras. These videos are analyzed with a camera-motion estimation algorithm (structure-from-motion) and labeled manually with a specific style. By considering spectral properties of location, orientation and acceleration, our solution learns camera motion details. Consequently, an arbitrary virtual base motion, defined in any conventional animation package, can be automatically modified according to a user-selected style. In an animation package the camera motion base path is typically defined by the user via function curves. Another possibility is to obtain the camera path by using a mixed reality camera in motion capturing studio. As shown in our experiments, the resulting shots are still fully artist-controlled, but appear richer and more physically plausible. %J Journal of Virtual Reality and Broadcasting %V 10 %N 7 %& 1 %P 1 - 13 %I Hochschulbibliothekszentrum des Landes Nordrhein-Westfalen, K&#246;ln (HBZ) %C K&#246;ln %@ false %U http://www.jvrb.org/past-issues/10.2013/3833/1020137.pdf
Klehm, O., Ihrke, I., Seidel, H.-P., and Eisemann, E. 2013. Volume Stylizer: Tomography-based Volume Painting. Proceedings I3D 2013, ACM.
Abstract
Volumetric phenomena are an integral part of standard rendering, yet, no <br>suitable tools to edit characteristic properties are available so far.<br>Either simulation results are used directly, or modifications are high-level, <br>e.g., noise functions to influence appearance. Intuitive artistic control is <br>not possible.<br><br>We propose a solution to stylize single-scattering volumetric effects. <br>Emission, scattering and extinction become amenable to artistic control while <br>preserving a smooth and coherent appearance when changing the viewpoint.<br>Our approach lets the user define a number of target views to be matched when <br>observing the volume from this perspective. Via an analysis of the volumetric <br>rendering equation, we can show how to link this problem to tomographic <br>reconstruction.
Export
BibTeX
@inproceedings{i3dKlehm2013, TITLE = {Volume Stylizer: {Tomography-based} Volume Painting}, AUTHOR = {Klehm, Oliver and Ihrke, Ivo and Seidel, Hans-Peter and Eisemann, Elmar}, LANGUAGE = {eng}, ISBN = {978-1-4503-1956-0}, DOI = {10.1145/2448196.2448222}, LOCALID = {Local-ID: A0B42A95204F2B1EC1257B03005B313A-i3dKlehm2013}, PUBLISHER = {ACM}, YEAR = {2013}, DATE = {2013}, ABSTRACT = {Volumetric phenomena are an integral part of standard rendering, yet, no <br>suitable tools to edit characteristic properties are available so far.<br>Either simulation results are used directly, or modifications are high-level, <br>e.g., noise functions to influence appearance. Intuitive artistic control is <br>not possible.<br><br>We propose a solution to stylize single-scattering volumetric effects. <br>Emission, scattering and extinction become amenable to artistic control while <br>preserving a smooth and coherent appearance when changing the viewpoint.<br>Our approach lets the user define a number of target views to be matched when <br>observing the volume from this perspective. Via an analysis of the volumetric <br>rendering equation, we can show how to link this problem to tomographic <br>reconstruction.}, BOOKTITLE = {Proceedings I3D 2013}, EDITOR = {Olano, Marc and Otaduy, Miguel A. and Meenakshisundaram, Gopi and Yoon, Sung-Eui and Spencer, Stephen N.}, PAGES = {161--168}, ADDRESS = {Orlando, FL, USA}, }
Endnote
%0 Conference Proceedings %A Klehm, Oliver %A Ihrke, Ivo %A Seidel, Hans-Peter %A Eisemann, Elmar %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Volume Stylizer: Tomography-based Volume Painting : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-3817-9 %R 10.1145/2448196.2448222 %F OTHER: Local-ID: A0B42A95204F2B1EC1257B03005B313A-i3dKlehm2013 %D 2013 %B ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games %Z date of event: 2013-03-21 - 2013-03-23 %C Orlando, FL, USA %X Volumetric phenomena are an integral part of standard rendering, yet, no <br>suitable tools to edit characteristic properties are available so far.<br>Either simulation results are used directly, or modifications are high-level, <br>e.g., noise functions to influence appearance. Intuitive artistic control is <br>not possible.<br><br>We propose a solution to stylize single-scattering volumetric effects. <br>Emission, scattering and extinction become amenable to artistic control while <br>preserving a smooth and coherent appearance when changing the viewpoint.<br>Our approach lets the user define a number of target views to be matched when <br>observing the volume from this perspective. Via an analysis of the volumetric <br>rendering equation, we can show how to link this problem to tomographic <br>reconstruction. %B Proceedings I3D 2013 %E Olano, Marc; Otaduy, Miguel A.; Meenakshisundaram, Gopi; Yoon, Sung-Eui; Spencer, Stephen N. %P 161 - 168 %I ACM %@ 978-1-4503-1956-0
Kerber, J., Bokeloh, M., Wand, M., and Seidel, H.-P. 2013. Scalable Symmetry Detection for Urban Scenes. Computer Graphics Forum32, 1.
Export
BibTeX
@article{Kerber2013_1, TITLE = {Scalable Symmetry Detection for Urban Scenes}, AUTHOR = {Kerber, Jens and Bokeloh, Martin and Wand, Michael and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1111/j.1467-8659.2012.03226.x}, LOCALID = {Local-ID: FC00BBDD131C5BC2C1257AED003BCDC9-Kerber2013_1}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2013}, DATE = {2013}, JOURNAL = {Computer Graphics Forum}, VOLUME = {32}, NUMBER = {1}, PAGES = {3--15}, }
Endnote
%0 Journal Article %A Kerber, Jens %A Bokeloh, Martin %A Wand, Michael %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Scalable Symmetry Detection for Urban Scenes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-17F3-0 %R 10.1111/j.1467-8659.2012.03226.x %F OTHER: Local-ID: FC00BBDD131C5BC2C1257AED003BCDC9-Kerber2013_1 %7 2012-10-09 %D 2013 %J Computer Graphics Forum %V 32 %N 1 %& 3 %P 3 - 15 %I Wiley-Blackwell %C Oxford, UK
Kellnhofer, P., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2013. Optimizing Disparity for Motion in Depth. Computer Graphics Forum (Proc. Eurographics Symposium on Rendering 2013)32, 4.
Abstract
Beyond the careful design of stereo acquisition equipment and rendering algorithms, disparity post-processing has recently received much attention, where one of the key tasks is to compress the originally large disparity range to avoid viewing discomfort. The perception of dynamic stereo content however, relies on reproducing the full disparity-time volume that a scene point undergoes in motion. This volume can be strongly distorted in manipulation, which is only concerned with changing disparity at one instant in time, even if the temporal coherence of that change is maintained. We propose an optimization to preserve stereo motion of content that was subject to an arbitrary disparity manipulation, based on a perceptual model of temporal disparity changes. Furthermore, we introduce a novel 3D warping technique to create stereo image pairs that conform to this optimized disparity map. The paper concludes with perceptual studies of motion reproduction quality and task performance in a simple game, showing how our optimization can achieve both viewing comfort and faithful stereo motion.
Export
BibTeX
@article{Kellnhofer2013, TITLE = {Optimizing Disparity for Motion in Depth}, AUTHOR = {Kellnhofer, Petr and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12160}, LOCALID = {Local-ID: AAA9E8B7CDD4AD1FC1257BFD004E5D30-Kellnhofer2013}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2013}, DATE = {2013}, ABSTRACT = {Beyond the careful design of stereo acquisition equipment and rendering algorithms, disparity post-processing has recently received much attention, where one of the key tasks is to compress the originally large disparity range to avoid viewing discomfort. The perception of dynamic stereo content however, relies on reproducing the full disparity-time volume that a scene point undergoes in motion. This volume can be strongly distorted in manipulation, which is only concerned with changing disparity at one instant in time, even if the temporal coherence of that change is maintained. We propose an optimization to preserve stereo motion of content that was subject to an arbitrary disparity manipulation, based on a perceptual model of temporal disparity changes. Furthermore, we introduce a novel 3D warping technique to create stereo image pairs that conform to this optimized disparity map. The paper concludes with perceptual studies of motion reproduction quality and task performance in a simple game, showing how our optimization can achieve both viewing comfort and faithful stereo motion.}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Rendering)}, VOLUME = {32}, NUMBER = {4}, PAGES = {143--152}, BOOKTITLE = {Eurographics Symposium on Rendering 2013}, EDITOR = {Holzschuch, N. and Rusinkiewicz, S.}, }
Endnote
%0 Journal Article %A Kellnhofer, Petr %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Optimizing Disparity for Motion in Depth : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-3D13-B %R 10.1111/cgf.12160 %F OTHER: Local-ID: AAA9E8B7CDD4AD1FC1257BFD004E5D30-Kellnhofer2013 %7 2013 %D 2013 %X Beyond the careful design of stereo acquisition equipment and rendering algorithms, disparity post-processing has recently received much attention, where one of the key tasks is to compress the originally large disparity range to avoid viewing discomfort. The perception of dynamic stereo content however, relies on reproducing the full disparity-time volume that a scene point undergoes in motion. This volume can be strongly distorted in manipulation, which is only concerned with changing disparity at one instant in time, even if the temporal coherence of that change is maintained. We propose an optimization to preserve stereo motion of content that was subject to an arbitrary disparity manipulation, based on a perceptual model of temporal disparity changes. Furthermore, we introduce a novel 3D warping technique to create stereo image pairs that conform to this optimized disparity map. The paper concludes with perceptual studies of motion reproduction quality and task performance in a simple game, showing how our optimization can achieve both viewing comfort and faithful stereo motion. %J Computer Graphics Forum %V 32 %N 4 %& 143 %P 143 - 152 %I Wiley-Blackwell %C Oxford, UK %@ false %B Eurographics Symposium on Rendering 2013 %O EGSR 2013 Eurographics Symposium on Rendering 2013 Zaragoza, 19 - 21 June, 2013
Helten, T., Müller, M., Seidel, H.-P., and Theobalt, C. 2013a. Real-time Body Tracking with One Depth Camera and Inertial Sensors. ICCV 2013, IEEE International Conference on Computer Vision, IEEE Computer Society.
Export
BibTeX
@inproceedings{HeltenICCV13, TITLE = {Real-time Body Tracking with One Depth Camera and Inertial Sensors}, AUTHOR = {Helten, Thomas and M{\"u}ller, Meinard and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {1550 -- 5499}, ISBN = {978-1-4799-2839-2}, DOI = {10.1109/ICCV.2013.141}, PUBLISHER = {IEEE Computer Society}, YEAR = {2013}, DATE = {2013}, BOOKTITLE = {ICCV 2013, IEEE International Conference on Computer Vision}, PAGES = {1105--1112}, ADDRESS = {Sydney, Australia}, }
Endnote
%0 Conference Proceedings %A Helten, Thomas %A M&#252;ller, Meinard %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Real-time Body Tracking with One Depth Camera and Inertial Sensors : %G eng %U http://hdl.handle.net/11858/00-001M-0000-001A-3416-E %R 10.1109/ICCV.2013.141 %D 2013 %B IEEE International Conference on Computer Vision %Z date of event: 2013-12-01 - 2013-12-08 %C Sydney, Australia %B ICCV 2013 %P 1105 - 1112 %I IEEE Computer Society %@ false
Helten, T., Baak, A., Bharaj, G., Müller, M., Seidel, H.-P., and Theobalt, C. 2013b. Personalization and Evaluation of a Real-time Depth-based Full Body Tracker. 3DV 2013, International Conference on 3D Vision, IEEE Computer Society.
Abstract
Reconstructing a three-dimensional representation of human motion in real-time constitutes an important research topic with applications in sports sciences, human-computer-interaction, and the movie industry. In this paper, we contribute with a robust algorithm for estimating a personalized human body model from just two sequentially captured depth images that is more accurate and runs an order of magnitude faster than the current state-of-the-art procedure. Then, we employ the estimated body model to track the pose in real-time from a stream of depth images using a tracking algorithm that combines local pose optimization and a stabilizing database look-up. Together, this enables accurate pose tracking that is more accurate than previous approaches. As a further contribution, we evaluate and compare our algorithm to previous work on a comprehensive benchmark dataset containing more than 15 minutes of challenging motions. This dataset comprises calibrated marker-based motion capture data, depth data, as well as ground truth tracking results and is publicly available for research purposes.
Export
BibTeX
@inproceedings{HeltenBBMST13_PersonalizedDepthTracker_3DV, TITLE = {Personalization and Evaluation of a Real-time Depth-based Full Body Tracker}, AUTHOR = {Helten, Thomas and Baak, Andreas and Bharaj, Gaurav and M{\"u}ller, Meinard and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-0-7695-5067-1}, DOI = {10.1109/3DV.2013.44}, LOCALID = {Local-ID: E6F32B3A0B6E280EC1257C580033927E-HeltenBBMST13_PersonalizedDepthTracker_3DV}, PUBLISHER = {IEEE Computer Society}, YEAR = {2013}, DATE = {2013}, ABSTRACT = {Reconstructing a three-dimensional representation of human motion in real-time constitutes an important research topic with applications in sports sciences, human-computer-interaction, and the movie industry. In this paper, we contribute with a robust algorithm for estimating a personalized human body model from just two sequentially captured depth images that is more accurate and runs an order of magnitude faster than the current state-of-the-art procedure. Then, we employ the estimated body model to track the pose in real-time from a stream of depth images using a tracking algorithm that combines local pose optimization and a stabilizing database look-up. Together, this enables accurate pose tracking that is more accurate than previous approaches. As a further contribution, we evaluate and compare our algorithm to previous work on a comprehensive benchmark dataset containing more than 15 minutes of challenging motions. This dataset comprises calibrated marker-based motion capture data, depth data, as well as ground truth tracking results and is publicly available for research purposes.}, BOOKTITLE = {3DV 2013, International Conference on 3D Vision}, EDITOR = {Guerrero, Juan E.}, PAGES = {279--286}, ADDRESS = {Seattle, WA, USA}, }
Endnote
%0 Conference Proceedings %A Helten, Thomas %A Baak, Andreas %A Bharaj, Gaurav %A M&#252;ller, Meinard %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Personalization and Evaluation of a Real-time Depth-based Full Body Tracker : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-3998-0 %R 10.1109/3DV.2013.44 %F OTHER: Local-ID: E6F32B3A0B6E280EC1257C580033927E-HeltenBBMST13_PersonalizedDepthTracker_3DV %D 2013 %B International Conference on 3D Vision %Z date of event: 2013-06-29 - 2013-07-01 %C Seattle, WA, USA %X Reconstructing a three-dimensional representation of human motion in real-time constitutes an important research topic with applications in sports sciences, human-computer-interaction, and the movie industry. In this paper, we contribute with a robust algorithm for estimating a personalized human body model from just two sequentially captured depth images that is more accurate and runs an order of magnitude faster than the current state-of-the-art procedure. Then, we employ the estimated body model to track the pose in real-time from a stream of depth images using a tracking algorithm that combines local pose optimization and a stabilizing database look-up. Together, this enables accurate pose tracking that is more accurate than previous approaches. As a further contribution, we evaluate and compare our algorithm to previous work on a comprehensive benchmark dataset containing more than 15 minutes of challenging motions. This dataset comprises calibrated marker-based motion capture data, depth data, as well as ground truth tracking results and is publicly available for research purposes. %B 3DV 2013 %E Guerrero, Juan E. %P 279 - 286 %I IEEE Computer Society %@ 978-0-7695-5067-1
Elek, O., Ritschel, T., and Seidel, H.-P. 2013. Real-time Screen-space Scattering in Homogeneous Environments. IEEE Computer Graphics and Applications33, 3.
Abstract
This work presents an approximate algorithm for computing light scattering within homogeneous participating environments in screen space. Instead of simulating the full global illumination in participating media we model the scattering process by a physically-based point spread function. To do this efficiently we apply the point spread function by performing a discrete hierarchical convolution in a texture MIP map. We solve the main problem of this approach, illumination leaking, by designing a custom anisotropic incremental filter. Our solution is fully parallel, runs in hundreds of frames-per-second for usual screen resolutions and is directly applicable in most existing 2D or 3D rendering architectures.
Export
BibTeX
@article{Elek2013a, TITLE = {Real-time Screen-space Scattering in Homogeneous Environments}, AUTHOR = {Elek, Oskar and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0272-1716}, DOI = {10.1109/MCG.2013.17}, LOCALID = {Local-ID: 2CEB4CE37F3F3733C1257B030043502E-Elek2013a}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {Los Alamitos, CA}, YEAR = {2013}, DATE = {2013}, ABSTRACT = {This work presents an approximate algorithm for computing light scattering within homogeneous participating environments in screen space. Instead of simulating the full global illumination in participating media we model the scattering process by a physically-based point spread function. To do this efficiently we apply the point spread function by performing a discrete hierarchical convolution in a texture MIP map. We solve the main problem of this approach, illumination leaking, by designing a custom anisotropic incremental filter. Our solution is fully parallel, runs in hundreds of frames-per-second for usual screen resolutions and is directly applicable in most existing 2D or 3D rendering architectures.}, JOURNAL = {IEEE Computer Graphics and Applications}, VOLUME = {33}, NUMBER = {3}, PAGES = {53--65}, }
Endnote
%0 Journal Article %A Elek, Oskar %A Ritschel, Tobias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Real-time Screen-space Scattering in Homogeneous Environments : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-3812-4 %R 10.1109/MCG.2013.17 %F OTHER: Local-ID: 2CEB4CE37F3F3733C1257B030043502E-Elek2013a %7 2013 %D 2013 %X This work presents an approximate algorithm for computing light scattering within homogeneous participating environments in screen space. Instead of simulating the full global illumination in participating media we model the scattering process by a physically-based point spread function. To do this efficiently we apply the point spread function by performing a discrete hierarchical convolution in a texture MIP map. We solve the main problem of this approach, illumination leaking, by designing a custom anisotropic incremental filter. Our solution is fully parallel, runs in hundreds of frames-per-second for usual screen resolutions and is directly applicable in most existing 2D or 3D rendering architectures. %J IEEE Computer Graphics and Applications %V 33 %N 3 %& 53 %P 53 - 65 %I IEEE Computer Society %C Los Alamitos, CA %@ false
Čadík, M., Herzog, R., Mantiuk, R., Mantiuk, R., Myszkowski, K., and Seidel, H.-P. 2013. Learning to Predict Localized Distortions in Rendered Images. Computer Graphics Forum (Proc. Pacific Graphics 2013)32, 7.
Export
BibTeX
@article{CadikPG2013, TITLE = {Learning to Predict Localized Distortions in Rendered Images}, AUTHOR = {{\v C}ad{\'i}k, Martin and Herzog, Robert and Mantiuk, Rafa{\l} and Mantiuk, Rados{\l}aw and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1111/cgf.12248}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2013}, DATE = {2013}, JOURNAL = {Computer Graphics Forum (Proc. Pacific Graphics)}, VOLUME = {32}, NUMBER = {7}, PAGES = {401--410}, BOOKTITLE = {21st Pacific Conference on Computer Graphics and Applications (Pacific Graphics 2013)}, }
Endnote
%0 Journal Article %A &#268;ad&#237;k, Martin %A Herzog, Robert %A Mantiuk, Rafa&#322; %A Mantiuk, Rados&#322;aw %A Myszkowski, Karol %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Learning to Predict Localized Distortions in Rendered Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5DF9-E %R 10.1111/cgf.12248 %7 2014-11-25 %D 2013 %J Computer Graphics Forum %V 32 %N 7 %& 401 %P 401 - 410 %I Wiley-Blackwell %C Oxford %B 21st Pacific Conference on Computer Graphics and Applications %O Pacific Graphics 2013 PG 2013 October 7-9, 2013, Singapore
Burghard, O., Berner, A., Wand, M., Mitra, N.J., Seidel, H.-P., and Klein, R. 2013. Compact Part-based Shape Spaces for Dense Correspondences. CoRRabs/1311.7535.
Export
BibTeX
@article{DBLP:journals/corr/BurghardBWMSK13, TITLE = {Compact Part-based Shape Spaces for Dense Correspondences}, AUTHOR = {Burghard, Oliver and Berner, Alexander and Wand, Michael and Mitra, Niloy J. and Seidel, Hans-Peter and Klein, Reinhard}, LANGUAGE = {eng}, DOI = {10.48550/arXiv.1311.7535}, PUBLISHER = {Bonn University}, ADDRESS = {Bonn}, YEAR = {2013}, DATE = {2013}, JOURNAL = {CoRR}, VOLUME = {abs/1311.7535}, PAGES = {1--23}, }
Endnote
%0 Journal Article %A Burghard, Oliver %A Berner, Alexander %A Wand, Michael %A Mitra, Niloy J. %A Seidel, Hans-Peter %A Klein, Reinhard %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Compact Part-based Shape Spaces for Dense Correspondences : %G eng %U http://hdl.handle.net/21.11116/0000-000F-6BA2-2 %R 10.48550/arXiv.1311.7535 %D 2013 %J CoRR %V abs/1311.7535 %& 1 %P 1 - 23 %I Bonn University %C Bonn
Baak, A., Müller, M., Bharaj, G., Seidel, H.-P., and Theobalt, C. 2013. A Data-driven Approach for Real-time Full Body Pose Reconstruction from a Depth Camera. In: Consumer Depth Cameras for Computer Vision. Springer, London.
Export
BibTeX
@incollection{BaakMuBhSeTh12_DataDrivenDepthTracking_BookChapter, TITLE = {A Data-driven Approach for Real-time Full Body Pose Reconstruction from a Depth Camera}, AUTHOR = {Baak, Andreas and M{\"u}ller, Meinard and Bharaj, Gaurav and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-4471-4639-1; 978-1-4471-4640-7}, DOI = {10.1007/978-1-4471-4640-7_5}, PUBLISHER = {Springer}, ADDRESS = {London}, YEAR = {2013}, DATE = {2013}, BOOKTITLE = {Consumer Depth Cameras for Computer Vision}, EDITOR = {Fossati, Andrea and Gall, Juergen and Grabner, Helmut and Ren, Xiaofeng and Konolige, Kurt}, PAGES = {71--98}, SERIES = {Advances in Computer Vision and Pattern Recognition}, }
Endnote
%0 Book Section %A Baak, Andreas %A M&#252;ller, Meinard %A Bharaj, Gaurav %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Data-driven Approach for Real-time Full Body Pose Reconstruction from a Depth Camera : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-13D1-F %R 10.1007/978-1-4471-4640-7_5 %D 2013 %B Consumer Depth Cameras for Computer Vision %E Fossati, Andrea; Gall, Juergen; Grabner, Helmut; Ren, Xiaofeng; Konolige, Kurt %P 71 - 98 %I Springer %C London %@ 978-1-4471-4639-1 978-1-4471-4640-7 %S Advances in Computer Vision and Pattern Recognition %U https://rdcu.be/dKNC3
2012
Yang, Y., Günther, D., Wuhrer, S., et al. 2012. Correspondences of Persistent Feature Points on Near-isometric Surfaces. Computer Vision - ECCV 2012, Springer.
Export
BibTeX
@inproceedings{yang12, TITLE = {Correspondences of Persistent Feature Points on Near-isometric Surfaces}, AUTHOR = {Yang, Ying and G{\"u}nther, David and Wuhrer, Stefanie and Brunton, Alan and Ivrissimtzis, Ioannis and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, ISSN = {0302-9743; 1611-3349}, ISBN = {978-3-642-33862-5; 978-3-642-33863-2}, DOI = {10.1007/978-3-642-33863-2_11}, LOCALID = {Local-ID: 2862002BA9203D8AC1257AD80048C3AC-yang12}, PUBLISHER = {Springer}, YEAR = {2012}, DATE = {2012}, BOOKTITLE = {Computer Vision -- ECCV 2012}, EDITOR = {Fusiello, Andrea and Murino, Vittorio and Cucchiara, Rita}, PAGES = {102--112}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {7583}, ADDRESS = {Florence, Italy}, }
Endnote
%0 Conference Proceedings %A Yang, Ying %A G&#252;nther, David %A Wuhrer, Stefanie %A Brunton, Alan %A Ivrissimtzis, Ioannis %A Seidel, Hans-Peter %A Weinkauf, Tino %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Correspondences of Persistent Feature Points on Near-isometric Surfaces : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-0DFB-2 %F OTHER: Local-ID: 2862002BA9203D8AC1257AD80048C3AC-yang12 %R 10.1007/978-3-642-33863-2_11 %D 2012 %B 12th European Conference on Computer Vision %Z date of event: 2012-10-07 - 2012-10-13 %C Florence, Italy %B Computer Vision - ECCV 2012 %E Fusiello, Andrea; Murino, Vittorio; Cucchiara, Rita %P 102 - 112 %I Springer %@ 978-3-642-33862-5 978-3-642-33863-2 %B Lecture Notes in Computer Science %N 7583 %@ false %U https://rdcu.be/dJK66
Valgaerts, L., Wu, C., Bruhn, A., Seidel, H.-P., and Theobalt, C. 2012. Lightweight Binocular Facial Performance Capture under Uncontrolled Lighting. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2012)31, 6.
Abstract
Recent progress in passive facial performance capture has shown impressively detailed results on highly articulated motion. However, most methods rely on complex multi-camera set-ups, controlled lighting or fiducial markers. This prevents them from being used in general environments, outdoor scenes, during live action on a film set, or by freelance animators and everyday users who want to capture their digital selves. In this paper, we therefore propose a lightweight passive facial performance capture approach that is able to reconstruct high-quality dynamic facial geometry from only a single pair of stereo cameras. Our method succeeds under uncontrolled and time-varying lighting, and also in outdoor scenes. Our approach builds upon and extends recent image-based scene flow computation, lighting estimation and shading-based refinement algorithms. It integrates them into a pipeline that is specifically tailored towards facial performance reconstruction from challenging binocular footage under uncontrolled lighting. In an experimental evaluation, the strong capabilities of our method become explicit: We achieve detailed and spatio-temporally coherent results for expressive facial motion in both indoor and outdoor scenes -- even from low quality input images recorded with a hand-held consumer stereo camera. We believe that our approach is the first to capture facial performances of such high quality from a single stereo rig and we demonstrate that it brings facial performance capture out of the studio, into the wild, and within the reach of everybody.
Export
BibTeX
@article{Valgaerts2012, TITLE = {Lightweight Binocular Facial Performance Capture under Uncontrolled Lighting}, AUTHOR = {Valgaerts, Levi and Wu, Chenglei and Bruhn, Andr{\'e}s and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2366145.2366206}, LOCALID = {Local-ID: C52293511BC90BA6C1257AD60059643C-Valgaerts2012}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2012}, DATE = {2012}, ABSTRACT = {Recent progress in passive facial performance capture has shown impressively detailed results on highly articulated motion. However, most methods rely on complex multi-camera set-ups, controlled lighting or fiducial markers. This prevents them from being used in general environments, outdoor scenes, during live action on a film set, or by freelance animators and everyday users who want to capture their digital selves. In this paper, we therefore propose a lightweight passive facial performance capture approach that is able to reconstruct high-quality dynamic facial geometry from only a single pair of stereo cameras. Our method succeeds under uncontrolled and time-varying lighting, and also in outdoor scenes. Our approach builds upon and extends recent image-based scene flow computation, lighting estimation and shading-based refinement algorithms. It integrates them into a pipeline that is specifically tailored towards facial performance reconstruction from challenging binocular footage under uncontrolled lighting. In an experimental evaluation, the strong capabilities of our method become explicit: We achieve detailed and spatio-temporally coherent results for expressive facial motion in both indoor and outdoor scenes -- even from low quality input images recorded with a hand-held consumer stereo camera. We believe that our approach is the first to capture facial performances of such high quality from a single stereo rig and we demonstrate that it brings facial performance capture out of the studio, into the wild, and within the reach of everybody.}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {31}, NUMBER = {6}, PAGES = {1--11}, EID = {187}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2012}, }
Endnote
%0 Journal Article %A Valgaerts, Levi %A Wu, Chenglei %A Bruhn, Andr&#233;s %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Lightweight Binocular Facial Performance Capture under Uncontrolled Lighting : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-1626-6 %F OTHER: Local-ID: C52293511BC90BA6C1257AD60059643C-Valgaerts2012 %R 10.1145/2366145.2366206 %7 2012-11-01 %D 2012 %X Recent progress in passive facial performance capture has shown impressively detailed results on highly articulated motion. However, most methods rely on complex multi-camera set-ups, controlled lighting or fiducial markers. This prevents them from being used in general environments, outdoor scenes, during live action on a film set, or by freelance animators and everyday users who want to capture their digital selves. In this paper, we therefore propose a lightweight passive facial performance capture approach that is able to reconstruct high-quality dynamic facial geometry from only a single pair of stereo cameras. Our method succeeds under uncontrolled and time-varying lighting, and also in outdoor scenes. Our approach builds upon and extends recent image-based scene flow computation, lighting estimation and shading-based refinement algorithms. It integrates them into a pipeline that is specifically tailored towards facial performance reconstruction from challenging binocular footage under uncontrolled lighting. In an experimental evaluation, the strong capabilities of our method become explicit: We achieve detailed and spatio-temporally coherent results for expressive facial motion in both indoor and outdoor scenes -- even from low quality input images recorded with a hand-held consumer stereo camera. We believe that our approach is the first to capture facial performances of such high quality from a single stereo rig and we demonstrate that it brings facial performance capture out of the studio, into the wild, and within the reach of everybody. %J ACM Transactions on Graphics %V 31 %N 6 %& 1 %P 1 - 11 %Z sequence number: 187 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2012 %O ACM SIGGRAPH Asia 2012 Singapore, 28 November - 1 December
Tevs, A., Berner, A., Wand, M., et al. 2012. Animation Cartography - Intrinsic Reconstruction of Shape and Motion. ACM Transactions on Graphics31, 2.
Abstract
In this paper, we consider the problem of animation reconstruction, i.e., the reconstruction of shape and motion of a deformable object from dynamic 3D scanner data, without using user provided template models. Unlike previous work that addressed this problem, we do not rely on locally convergent optimization but present a system that can handle fast motion, temporally disrupted input, and can correctly match objects that disappear for extended time periods in acquisition holes due to occlusion. Our approach is motivated by cartography: We first estimate a few landmark correspondences, which are extended to a dense matching and then used to reconstruct geometry and motion. We propose a number of algorithmic building blocks: a scheme for tracking landmarks in temporally coherent and incoherent data, an algorithm for robust estimation of dense correspondences under topological noise, and the integration of local matching techniques to refine the result. We describe and evaluate the individual components and propose a complete animation reconstruction pipeline based on these ideas. We evaluate our method on a number of standard benchmark data sets and show that we can obtain correct reconstructions in situations where other techniques fail completely or require additional user guidance such as a template model.
Export
BibTeX
@article{TevsTog2012, TITLE = {Animation Cartography -- Intrinsic Reconstruction of Shape and Motion}, AUTHOR = {Tevs, Art and Berner, Alexander and Wand, Michael and Ihrke, Ivo and Bokeloh, Martin and Kerber, Jens and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2159516.2159517}, LOCALID = {Local-ID: F830F7C449A5797BC12579CD0040CC2F-TevsTog2012}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2012}, DATE = {2012}, ABSTRACT = {In this paper, we consider the problem of animation reconstruction, i.e., the reconstruction of shape and motion of a deformable object from dynamic 3D scanner data, without using user provided template models. Unlike previous work that addressed this problem, we do not rely on locally convergent optimization but present a system that can handle fast motion, temporally disrupted input, and can correctly match objects that disappear for extended time periods in acquisition holes due to occlusion. Our approach is motivated by cartography: We first estimate a few landmark correspondences, which are extended to a dense matching and then used to reconstruct geometry and motion. We propose a number of algorithmic building blocks: a scheme for tracking landmarks in temporally coherent and incoherent data, an algorithm for robust estimation of dense correspondences under topological noise, and the integration of local matching techniques to refine the result. We describe and evaluate the individual components and propose a complete animation reconstruction pipeline based on these ideas. We evaluate our method on a number of standard benchmark data sets and show that we can obtain correct reconstructions in situations where other techniques fail completely or require additional user guidance such as a template model.}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {31}, NUMBER = {2}, PAGES = {1--15}, }
Endnote
%0 Journal Article %A Tevs, Art %A Berner, Alexander %A Wand, Michael %A Ihrke, Ivo %A Bokeloh, Martin %A Kerber, Jens %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Animation Cartography - Intrinsic Reconstruction of Shape and Motion : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-161E-9 %F OTHER: Local-ID: F830F7C449A5797BC12579CD0040CC2F-TevsTog2012 %R 10.1145/2159516.2159517 %7 2012-04-01 %D 2012 %X In this paper, we consider the problem of animation reconstruction, i.e., the reconstruction of shape and motion of a deformable object from dynamic 3D scanner data, without using user provided template models. Unlike previous work that addressed this problem, we do not rely on locally convergent optimization but present a system that can handle fast motion, temporally disrupted input, and can correctly match objects that disappear for extended time periods in acquisition holes due to occlusion. Our approach is motivated by cartography: We first estimate a few landmark correspondences, which are extended to a dense matching and then used to reconstruct geometry and motion. We propose a number of algorithmic building blocks: a scheme for tracking landmarks in temporally coherent and incoherent data, an algorithm for robust estimation of dense correspondences under topological noise, and the integration of local matching techniques to refine the result. We describe and evaluate the individual components and propose a complete animation reconstruction pipeline based on these ideas. We evaluate our method on a number of standard benchmark data sets and show that we can obtain correct reconstructions in situations where other techniques fail completely or require additional user guidance such as a template model. %J ACM Transactions on Graphics %V 31 %N 2 %& 1 %P 1 - 15 %I ACM %C New York, NY %@ false
Templin, K., Didyk, P., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2012. Highlight Microdisparity for Improved Gloss Depiction. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2012)31, 4.
Abstract
Human stereo perception of glossy materials is substantially different from the perception of diffuse surfaces: A single point on a diffuse object appears the same for both eyes, whereas it appears different to both eyes on a specular object. As highlights are blurry reflections of light sources they have depth themselves, which is different from the depth of the reflecting surface. We call this difference in depth impression the ``highlight disparity''. Due to artistic motivation, for technical reasons, or because of incomplete data, highlights often have to be depicted on-surface, without any disparity. However, it has been shown that a lack of disparity decreases the perceived glossiness and authenticity of a material. To remedy this contradiction, our work introduces a technique for depiction of glossy materials, which improves over simple on-surface highlights, and avoids the problems of physical highlights. Our technique is computationally simple, can be easily integrated in an existing (GPU) shading system, and allows for local and interactive artistic control.
Export
BibTeX
@article{Templin2012, TITLE = {Highlight Microdisparity for Improved Gloss Depiction}, AUTHOR = {Templin, Krzysztof and Didyk, Piotr and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2185520.2185588}, LOCALID = {Local-ID: BDB99D9DBF6B290EC1257A4500551595-Templin2012}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2012}, DATE = {2012}, ABSTRACT = {Human stereo perception of glossy materials is substantially different from the perception of diffuse surfaces: A single point on a diffuse object appears the same for both eyes, whereas it appears different to both eyes on a specular object. As highlights are blurry reflections of light sources they have depth themselves, which is different from the depth of the reflecting surface. We call this difference in depth impression the ``highlight disparity''. Due to artistic motivation, for technical reasons, or because of incomplete data, highlights often have to be depicted on-surface, without any disparity. However, it has been shown that a lack of disparity decreases the perceived glossiness and authenticity of a material. To remedy this contradiction, our work introduces a technique for depiction of glossy materials, which improves over simple on-surface highlights, and avoids the problems of physical highlights. Our technique is computationally simple, can be easily integrated in an existing (GPU) shading system, and allows for local and interactive artistic control.}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {31}, NUMBER = {4}, PAGES = {1--5}, EID = {92}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2012}, }
Endnote
%0 Journal Article %A Templin, Krzysztof %A Didyk, Piotr %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Highlight Microdisparity for Improved Gloss Depiction : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-1617-8 %F OTHER: Local-ID: BDB99D9DBF6B290EC1257A4500551595-Templin2012 %R 10.1145/2185520.2185588 %7 2012-07-01 %D 2012 %X Human stereo perception of glossy materials is substantially different from the perception of diffuse surfaces: A single point on a diffuse object appears the same for both eyes, whereas it appears different to both eyes on a specular object. As highlights are blurry reflections of light sources they have depth themselves, which is different from the depth of the reflecting surface. We call this difference in depth impression the ``highlight disparity''. Due to artistic motivation, for technical reasons, or because of incomplete data, highlights often have to be depicted on-surface, without any disparity. However, it has been shown that a lack of disparity decreases the perceived glossiness and authenticity of a material. To remedy this contradiction, our work introduces a technique for depiction of glossy materials, which improves over simple on-surface highlights, and avoids the problems of physical highlights. Our technique is computationally simple, can be easily integrated in an existing (GPU) shading system, and allows for local and interactive artistic control. %J ACM Transactions on Graphics %V 31 %N 4 %& 1 %P 1 - 5 %Z sequence number: 92 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2012 %O ACM SIGGRAPH 2012 Los Angeles, California, 5 - 9 August 2012
Stöter, T., Weinkauf, T., Seidel, H.-P., and Theisel, H. 2012. Implicit Integral Surfaces. VMV 2012 Vision, Modeling & Visualization, Eurographics Association.
Abstract
We present an implicit method for globally computing all four classic types of integral surfaces -- stream, path, streak, and time surfaces in 3D time-dependent vector fields. Our novel formulation is based on the representation of a time surface as implicit isosurface of a 3D scalar function advected by the flow field. The evolution of a time surface is then given as an isovolume in 4D space-time spanned by a series of advected scalar functions. Based on this, the other three integral surfaces are described as the intersection of two isovolumes derived from different scalar functions. Our method uses a dense flow integration to compute integral surfaces globally in the entire domain. This allows to change the seeding structure efficiently by simply defining new isovalues. We propose two rendering methods that exploit the implicit nature of our integral surfaces: 4D raycasting, and projection into a 3D volume. Furthermore, we present a marching cubes inspired surface extraction method to convert the implicit surface representation to an explicit triangle mesh. In contrast to previous approaches for implicit stream surfaces, our method allows for multiple voxel intersections, covers all regions of the flow field, and provides full control over the seeding line within the entire domain.
Export
BibTeX
@inproceedings{stoeter12, TITLE = {Implicit Integral Surfaces}, AUTHOR = {St{\"o}ter, Torsten and Weinkauf, Tino and Seidel, Hans-Peter and Theisel, Holger}, LANGUAGE = {eng}, ISBN = {978-3-905673-95-1}, DOI = {10.2312/PE/VMV/VMV12/127-134}, LOCALID = {Local-ID: CE2200B8F8C0B666C1257AD8003ECF58-stoeter12}, PUBLISHER = {Eurographics Association}, YEAR = {2012}, DATE = {2012}, ABSTRACT = {We present an implicit method for globally computing all four classic types of integral surfaces -- stream, path, streak, and time surfaces in 3D time-dependent vector fields. Our novel formulation is based on the representation of a time surface as implicit isosurface of a 3D scalar function advected by the flow field. The evolution of a time surface is then given as an isovolume in 4D space-time spanned by a series of advected scalar functions. Based on this, the other three integral surfaces are described as the intersection of two isovolumes derived from different scalar functions. Our method uses a dense flow integration to compute integral surfaces globally in the entire domain. This allows to change the seeding structure efficiently by simply defining new isovalues. We propose two rendering methods that exploit the implicit nature of our integral surfaces: 4D raycasting, and projection into a 3D volume. Furthermore, we present a marching cubes inspired surface extraction method to convert the implicit surface representation to an explicit triangle mesh. In contrast to previous approaches for implicit stream surfaces, our method allows for multiple voxel intersections, covers all regions of the flow field, and provides full control over the seeding line within the entire domain.}, BOOKTITLE = {VMV 2012 Vision, Modeling \& Visualization}, EDITOR = {G{\"o}sele, Michael and Grosch, Thorsten and Theisel, Holger and Toennies, Klaus and Preim, Bernhard}, PAGES = {127--134}, ADDRESS = {Magdeburg, Germany}, }
Endnote
%0 Conference Proceedings %A St&#246;ter, Torsten %A Weinkauf, Tino %A Seidel, Hans-Peter %A Theisel, Holger %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Implicit Integral Surfaces : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-0E9C-E %F OTHER: Local-ID: CE2200B8F8C0B666C1257AD8003ECF58-stoeter12 %R 10.2312/PE/VMV/VMV12/127-134 %D 2012 %B 17th Annual Workshop on Vision, Modeling and Visualization %Z date of event: 2012-11-12 - 2012-11-14 %C Magdeburg, Germany %X We present an implicit method for globally computing all four classic types of integral surfaces -- stream, path, streak, and time surfaces in 3D time-dependent vector fields. Our novel formulation is based on the representation of a time surface as implicit isosurface of a 3D scalar function advected by the flow field. The evolution of a time surface is then given as an isovolume in 4D space-time spanned by a series of advected scalar functions. Based on this, the other three integral surfaces are described as the intersection of two isovolumes derived from different scalar functions. Our method uses a dense flow integration to compute integral surfaces globally in the entire domain. This allows to change the seeding structure efficiently by simply defining new isovalues. We propose two rendering methods that exploit the implicit nature of our integral surfaces: 4D raycasting, and projection into a 3D volume. Furthermore, we present a marching cubes inspired surface extraction method to convert the implicit surface representation to an explicit triangle mesh. In contrast to previous approaches for implicit stream surfaces, our method allows for multiple voxel intersections, covers all regions of the flow field, and provides full control over the seeding line within the entire domain. %B VMV 2012 Vision, Modeling & Visualization %E G&#246;sele, Michael; Grosch, Thorsten; Theisel, Holger; Toennies, Klaus; Preim, Bernhard %P 127 - 134 %I Eurographics Association %@ 978-3-905673-95-1
Scherzer, D., Nguyen, C., Ritschel, T., and Seidel, H.-P. 2012. Pre-convolved Radiance Caching. Computer Graphics Forum (Proc. Eurographics Symposium on Rendering 2012)31, 4.
Export
BibTeX
@article{Scherzer2012PcRC, TITLE = {Pre-convolved Radiance Caching}, AUTHOR = {Scherzer, Daniel and Nguyen, Chuong and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2012.03134.x}, LOCALID = {Local-ID: 983F2B7BB8314818C1257AD800380D3C-Scherzer2012PcRC}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2012}, DATE = {2012}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Rendering)}, VOLUME = {31}, NUMBER = {4}, PAGES = {1391--1397}, BOOKTITLE = {Eurographics Symposium on Rendering 2012}, EDITOR = {Durand, Fredo and Gutierrez, Diego}, }
Endnote
%0 Journal Article %A Scherzer, Daniel %A Nguyen, Chuong %A Ritschel, Tobias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Pre-convolved Radiance Caching : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-1210-B %F OTHER: Local-ID: 983F2B7BB8314818C1257AD800380D3C-Scherzer2012PcRC %R 10.1111/j.1467-8659.2012.03134.x %7 2012-07-04 %D 2012 %J Computer Graphics Forum %V 31 %N 4 %& 1391 %P 1391 - 1397 %I Wiley-Blackwell %C Oxford, UK %@ false %B Eurographics Symposium on Rendering 2012 %O Paris, France, June 27th - 29th, 2012 SR 2012 EGSR 2012 Eurographics Symposium on Rendering 2012
Ritschel, T., Templin, K., Myszkowski, K., and Seidel, H.-P. 2012. Virtual Passepartouts. Non-Photorealistic Animation and Rendering (NPAR 2012), Eurographics Association.
Abstract
In traditional media, such as photography and painting, a cardboard sheet with a cutout (called \emphpassepartout}) is frequently placed on top of an image. One of its functions is to increase the depth impression via the ``looking-through-a-window'' metaphor. This paper shows how an improved 3D~effect can be achieved by using a \emph{virtual passepartout: a 2D framing that selectively masks the 3D shape and leads to additional occlusion events between the virtual world and the frame. We introduce a pipeline to design virtual passepartouts interactively as a simple post-process on RGB images augmented with depth information. Additionally, an automated approach finds the optimal virtual passepartout for a given scene. Virtual passepartouts can be used to enhance depth depiction in images and videos with depth information, renderings, stereo images and the fabrication of physical passepartouts.
Export
BibTeX
@inproceedings{RitschelTMS2012, TITLE = {Virtual Passepartouts}, AUTHOR = {Ritschel, Tobias and Templin, Krzysztof and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-905673-90-6}, DOI = {10.2312/PE/NPAR/NPAR12/057-063}, LOCALID = {Local-ID: AF8C88CA4485E3B1C1257A4500606C5D-RitschelTMS2012}, PUBLISHER = {Eurographics Association}, YEAR = {2012}, DATE = {2012}, ABSTRACT = {In traditional media, such as photography and painting, a cardboard sheet with a cutout (called \emphpassepartout}) is frequently placed on top of an image. One of its functions is to increase the depth impression via the ``looking-through-a-window'' metaphor. This paper shows how an improved 3D~effect can be achieved by using a \emph{virtual passepartout: a 2D framing that selectively masks the 3D shape and leads to additional occlusion events between the virtual world and the frame. We introduce a pipeline to design virtual passepartouts interactively as a simple post-process on RGB images augmented with depth information. Additionally, an automated approach finds the optimal virtual passepartout for a given scene. Virtual passepartouts can be used to enhance depth depiction in images and videos with depth information, renderings, stereo images and the fabrication of physical passepartouts.}, BOOKTITLE = {Non-Photorealistic Animation and Rendering (NPAR 2012)}, EDITOR = {Asente, Paul and Grimm, Cindy}, PAGES = {57--63}, ADDRESS = {Annecy, France}, }
Endnote
%0 Conference Proceedings %A Ritschel, Tobias %A Templin, Krzysztof %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Virtual Passepartouts : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-13D3-B %R 10.2312/PE/NPAR/NPAR12/057-063 %F OTHER: Local-ID: AF8C88CA4485E3B1C1257A4500606C5D-RitschelTMS2012 %D 2012 %B Non-Photorealistic Animation and Rendering 2012 %Z date of event: 2012-06-04 - 2012-06-06 %C Annecy, France %X In traditional media, such as photography and painting, a cardboard sheet with a cutout (called \emphpassepartout}) is frequently placed on top of an image. One of its functions is to increase the depth impression via the ``looking-through-a-window'' metaphor. This paper shows how an improved 3D~effect can be achieved by using a \emph{virtual passepartout: a 2D framing that selectively masks the 3D shape and leads to additional occlusion events between the virtual world and the frame. We introduce a pipeline to design virtual passepartouts interactively as a simple post-process on RGB images augmented with depth information. Additionally, an automated approach finds the optimal virtual passepartout for a given scene. Virtual passepartouts can be used to enhance depth depiction in images and videos with depth information, renderings, stereo images and the fabrication of physical passepartouts. %B Non-Photorealistic Animation and Rendering %E Asente, Paul; Grimm, Cindy %P 57 - 63 %I Eurographics Association %@ 978-3-905673-90-6
Richardt, C., Stoll, C., Dodgson, N.A., Seidel, H.-P., and Theobalt, C. 2012. Coherent Spatiotemporal Filtering, Upsampling and Rendering of RGBZ Videos. Computer Graphics Forum (Proc. EUROGRAPHICS 2012)31, 2.
Abstract
Sophisticated video processing effects require both image and geometry information. We explore the possibility to augment a video camera with a recent infrared time-of-flight depth camera, to capture high-resolution RGB and low-resolution, noisy depth at video frame rates. To turn such a setup into a practical RGBZ video camera, we develop efficient data filtering techniques that are tailored to the noise characteristics of IR depth cameras. We first remove typical artefacts in the RGBZ data and then apply an efficient spatiotemporal denoising and upsampling scheme. This allows us to record temporally coherent RGBZ videos at interactive frame rates and to use them to render a variety of effects in unprecedented quality. We show effects such as video relighting, geometry-based abstraction and stylisation, background segmentation and rendering in stereoscopic 3D.
Export
BibTeX
@article{Richardt2012, TITLE = {Coherent Spatiotemporal Filtering, Upsampling and Rendering of {RGBZ} Videos}, AUTHOR = {Richardt, Christian and Stoll, Carsten and Dodgson, Neil A. and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2012.03003.x}, LOCALID = {Local-ID: 53A56E45860AC25EC1257AD70038C3F8-Richardt2012}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2012}, DATE = {2012}, ABSTRACT = {Sophisticated video processing effects require both image and geometry information. We explore the possibility to augment a video camera with a recent infrared time-of-flight depth camera, to capture high-resolution RGB and low-resolution, noisy depth at video frame rates. To turn such a setup into a practical RGBZ video camera, we develop efficient data filtering techniques that are tailored to the noise characteristics of IR depth cameras. We first remove typical artefacts in the RGBZ data and then apply an efficient spatiotemporal denoising and upsampling scheme. This allows us to record temporally coherent RGBZ videos at interactive frame rates and to use them to render a variety of effects in unprecedented quality. We show effects such as video relighting, geometry-based abstraction and stylisation, background segmentation and rendering in stereoscopic 3D.}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {31}, NUMBER = {2}, PAGES = {247--256}, BOOKTITLE = {EUROGRAPHICS 2012}, }
Endnote
%0 Journal Article %A Richardt, Christian %A Stoll, Carsten %A Dodgson, Neil A. %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Coherent Spatiotemporal Filtering, Upsampling and Rendering of RGBZ Videos : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-15DF-5 %F OTHER: Local-ID: 53A56E45860AC25EC1257AD70038C3F8-Richardt2012 %R 10.1111/j.1467-8659.2012.03003.x %7 2012-06-07 %D 2012 %X Sophisticated video processing effects require both image and geometry information. We explore the possibility to augment a video camera with a recent infrared time-of-flight depth camera, to capture high-resolution RGB and low-resolution, noisy depth at video frame rates. To turn such a setup into a practical RGBZ video camera, we develop efficient data filtering techniques that are tailored to the noise characteristics of IR depth cameras. We first remove typical artefacts in the RGBZ data and then apply an efficient spatiotemporal denoising and upsampling scheme. This allows us to record temporally coherent RGBZ videos at interactive frame rates and to use them to render a variety of effects in unprecedented quality. We show effects such as video relighting, geometry-based abstraction and stylisation, background segmentation and rendering in stereoscopic 3D. %J Computer Graphics Forum %V 31 %N 2 %& 247 %P 247 - 256 %I Wiley-Blackwell %C Oxford, UK %@ false %B EUROGRAPHICS 2012 %O The European Association for Computer Graphics 33rd Annual Conference, Cagliari, Sardinia, Italy, May 13th &#8211; 18th, 2012 EUROGRAPHICS 2012 EG 2012
Reuter, A., Seidel, H.-P., and Ihrke, I. 2012. BlurTags: Spatially Varying PSF Estimation with Out-of-Focus Patterns. 20th International Conference on Computer Graphics, Visualization and Computer Vision 2012 (WSCG 2012).
Export
BibTeX
@inproceedings{Reuter2012, TITLE = {{BlurTags}: {Spatially} Varying {PSF} Estimation with Out-of-Focus Patterns}, AUTHOR = {Reuter, Alexander and Seidel, Hans-Peter and Ihrke, Ivo}, LANGUAGE = {eng}, ISBN = {978-80-86943-79-4}, LOCALID = {Local-ID: 8C2D1525002E674EC1257AD7004B4A17-Reuter2012}, YEAR = {2012}, BOOKTITLE = {20th International Conference on Computer Graphics, Visualization and Computer Vision 2012 (WSCG 2012)}, EDITOR = {Skala, Vaclav}, PAGES = {239--248}, ADDRESS = {Plzen, Czech Republic}, }
Endnote
%0 Conference Proceedings %A Reuter, Alexander %A Seidel, Hans-Peter %A Ihrke, Ivo %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T BlurTags: Spatially Varying PSF Estimation with Out-of-Focus Patterns : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-12F2-D %F OTHER: Local-ID: 8C2D1525002E674EC1257AD7004B4A17-Reuter2012 %D 2012 %B 20th International Conference on Computer Graphics, Visualization and Computer Vision %Z date of event: 2012-06-26 - 2012-06-28 %C Plzen, Czech Republic %B 20th International Conference on Computer Graphics, Visualization and Computer Vision 2012 %E Skala, Vaclav %P 239 - 248 %@ 978-80-86943-79-4 %U http://wscg.zcu.cz/wscg2012/short/E47-full.pdf
Reininghaus, J., Günther, D., Hotz, I., Weinkauf, T., and Seidel, H.-P. 2012. Combinatorial Gradient Fields for 2D Images with Empirically Convergent Separatrices. http://arxiv.org/abs/1208.6523.
(arXiv: 1208.6523)
Abstract
This paper proposes an efficient probabilistic method that computes combinatorial gradient fields for two dimensional image data. In contrast to existing algorithms, this approach yields a geometric Morse-Smale complex that converges almost surely to its continuous counterpart when the image resolution is increased. This approach is motivated using basic ideas from probability theory and builds upon an algorithm from discrete Morse theory with a strong mathematical foundation. While a formal proof is only hinted at, we do provide a thorough numerical evaluation of our method and compare it to established algorithms.
Export
BibTeX
@online{reininghaus12a, TITLE = {Combinatorial Gradient Fields for {2D} Images with Empirically Convergent Separatrices}, AUTHOR = {Reininghaus, Jan and G{\"u}nther, David and Hotz, Ingrid and Weinkauf, Tino and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1208.6523}, EPRINT = {1208.6523}, EPRINTTYPE = {arXiv}, LOCALID = {Local-ID: 9717F6A3BD0231CAC1257AD800438371-reininghaus12a}, PUBLISHER = {Cornell University Library}, ADDRESS = {Ithaca, NY}, YEAR = {2012}, ABSTRACT = {This paper proposes an efficient probabilistic method that computes combinatorial gradient fields for two dimensional image data. In contrast to existing algorithms, this approach yields a geometric Morse-Smale complex that converges almost surely to its continuous counterpart when the image resolution is increased. This approach is motivated using basic ideas from probability theory and builds upon an algorithm from discrete Morse theory with a strong mathematical foundation. While a formal proof is only hinted at, we do provide a thorough numerical evaluation of our method and compare it to established algorithms.}, }
Endnote
%0 Report %A Reininghaus, Jan %A G&#252;nther, David %A Hotz, Ingrid %A Weinkauf, Tino %A Seidel, Hans-Peter %+ Institute for Science and Technology Austria Computer Graphics, MPI for Informatics, Max Planck Society Institute for Science and Technology Austria Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Combinatorial Gradient Fields for 2D Images with Empirically Convergent Separatrices : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-0E90-6 %U http://arxiv.org/abs/1208.6523 %F OTHER: Local-ID: 9717F6A3BD0231CAC1257AD800438371-reininghaus12a %I Cornell University Library %C Ithaca, NY %D 2012 %X This paper proposes an efficient probabilistic method that computes combinatorial gradient fields for two dimensional image data. In contrast to existing algorithms, this approach yields a geometric Morse-Smale complex that converges almost surely to its continuous counterpart when the image resolution is increased. This approach is motivated using basic ideas from probability theory and builds upon an algorithm from discrete Morse theory with a strong mathematical foundation. While a formal proof is only hinted at, we do provide a thorough numerical evaluation of our method and compare it to established algorithms. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Computational Geometry, cs.CG,Computer Science, Discrete Mathematics, cs.DM,
Reinert, B., Ritschel, T., and Seidel, H.-P. 2012. Homunculus Warping: Conveying Importance Using Self-intersection-free Non-homogeneous Mesh Deformation. Computer Graphics Forum (Proc. Pacific Graphics 2012)31, 7.
Export
BibTeX
@article{Reinert2012, TITLE = {Homunculus Warping: Conveying Importance Using Self-intersection-free Non-homogeneous Mesh Deformation}, AUTHOR = {Reinert, Bernhard and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2012.03209.x}, LOCALID = {Local-ID: 23F7E3C2BBBCDA78C1257B01005F5059-Reinert2012}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2012}, DATE = {2012}, JOURNAL = {Computer Graphics Forum (Proc. Pacific Graphics)}, EDITOR = {Bregler, Chris and Sander, Pedro and Wimmer, Michael}, VOLUME = {31}, NUMBER = {7}, PAGES = {2165--2171}, BOOKTITLE = {Pacific Graphics 2012}, EDITOR = {Bregler, Chris and Sander, Pedro and Wimmer, Michael}, }
Endnote
%0 Journal Article %A Reinert, Bernhard %A Ritschel, Tobias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Homunculus Warping: Conveying Importance Using Self-intersection-free Non-homogeneous Mesh Deformation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-1542-1 %F OTHER: Local-ID: 23F7E3C2BBBCDA78C1257B01005F5059-Reinert2012 %R 10.1111/j.1467-8659.2012.03209.x %7 2012-10-02 %D 2012 %J Computer Graphics Forum %V 31 %N 7 %& 2165 %P 2165 - 2171 %I Wiley-Blackwell %C Oxford, UK %@ false %B Pacific Graphics 2012 %O The 20th Pacific Conference on Computer Graphics and Applications, September 12-14, 2012, Hong Kong Pacific Graphics 2012
Nguyen, C., Ritschel, T., Myszkowski, K., Eisemann, E., and Seidel, H.-P. 2012. 3D Material Style Transfer. Computer Graphics Forum (Proc. EUROGRAPHICS 2012)31, 2.
Abstract
This work proposes a technique to transfer the material style or mood from a guide source such as an image or video onto a target 3D scene. It formulates the problem as a combinatorial optimization of assigning discrete materials extracted from the guide source to discrete objects in the target 3D scene. The assignment is optimized to fulfill multiple goals: overall image mood based on several image statistics; spatial material organization and grouping as well as geometric similarity between objects that were assigned to similar materials. To be able to use common uncalibrated images and videos with unknown geometry and lighting as guides, a material estimation derives perceptually plausible reflectance, specularity, glossiness, and texture. Finally, results produced by our method are compared to manual material assignments in a perceptual study.
Export
BibTeX
@article{Nguyen2012z, TITLE = {{3D} Material Style Transfer}, AUTHOR = {Nguyen, Chuong and Ritschel, Tobias and Myszkowski, Karol and Eisemann, Elmar and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2012.03022.x}, LOCALID = {Local-ID: 3C190E59F48516AFC1257B0100644708-Nguyen2012}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2012}, DATE = {2012}, ABSTRACT = {This work proposes a technique to transfer the material style or mood from a guide source such as an image or video onto a target 3D scene. It formulates the problem as a combinatorial optimization of assigning discrete materials extracted from the guide source to discrete objects in the target 3D scene. The assignment is optimized to fulfill multiple goals: overall image mood based on several image statistics; spatial material organization and grouping as well as geometric similarity between objects that were assigned to similar materials. To be able to use common uncalibrated images and videos with unknown geometry and lighting as guides, a material estimation derives perceptually plausible reflectance, specularity, glossiness, and texture. Finally, results produced by our method are compared to manual material assignments in a perceptual study.}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, EDITOR = {Cignoni, Paolo and Ertl, Thomas}, VOLUME = {31}, NUMBER = {2}, PAGES = {431--438}, BOOKTITLE = {EUROGRAPHICS 2012}, EDITOR = {Cignoni, Paolo and Ertl, Thomas}, }
Endnote
%0 Journal Article %A Nguyen, Chuong %A Ritschel, Tobias %A Myszkowski, Karol %A Eisemann, Elmar %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T 3D Material Style Transfer : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-1537-C %F OTHER: Local-ID: 3C190E59F48516AFC1257B0100644708-Nguyen2012 %R 10.1111/j.1467-8659.2012.03022.x %7 2012-06-07 %D 2012 %X This work proposes a technique to transfer the material style or mood from a guide source such as an image or video onto a target 3D scene. It formulates the problem as a combinatorial optimization of assigning discrete materials extracted from the guide source to discrete objects in the target 3D scene. The assignment is optimized to fulfill multiple goals: overall image mood based on several image statistics; spatial material organization and grouping as well as geometric similarity between objects that were assigned to similar materials. To be able to use common uncalibrated images and videos with unknown geometry and lighting as guides, a material estimation derives perceptually plausible reflectance, specularity, glossiness, and texture. Finally, results produced by our method are compared to manual material assignments in a perceptual study. %J Computer Graphics Forum %V 31 %N 2 %& 431 %P 431 - 438 %I Wiley-Blackwell %C Oxford, UK %@ false %B EUROGRAPHICS 2012 %O EUROGRAPHICS 2012 EG 2012 The European Association for Computer Graphics 33rd Annual Conference, Cagliari, Sardinia, Italy, May 13th &#8211; 18th, 2012
Klehm, O., Reshetouski, I., Eisemanm, E., Seidel, H.-P., and Ihrke, I. 2012a. Interactive Geometry-aware Segmentation for the Decomposition of Kaleidoscopic Images. VMV 2012 Vision, Modeling and Visualization, Eurographics Association.
Abstract
Mirror systems have recently emerged as an alternative low-cost multi-view imaging solution. The use of these systems critically depends on the ability to compute the background of a multiply mirrored object. The images taken in such systems show a fractured, patterned view, making edge-guided segmentation difficult. Further, global illumination and light attenuation due to the mirrors make standard segmentation techniques fail. We therefore propose a system that allows a user to do the segmentation manually. We provide convenient tools that enable an interactive segmentation of kaleidoscopic images containing three-dimensional objects. Hereby, we explore suitable interaction and visualization schemes to guide the user. To achieve interactivity, we employ the GPU in all stages of the application, such as 2D/3D rendering as well as segmentation.
Export
BibTeX
@inproceedings{KST_VMV_Klehm2012, TITLE = {Interactive Geometry-aware Segmentation for the Decomposition of Kaleidoscopic Images}, AUTHOR = {Klehm, Oliver and Reshetouski, Ilya and Eisemanm, Elmar and Seidel, Hans-Peter and Ihrke, Ivo}, LANGUAGE = {eng}, ISBN = {978-3-905673-95-1}, DOI = {10.2312/PE/VMV/VMV12/009-014}, LOCALID = {Local-ID: 728254C7FB47D385C1257AAA003C9878-KST_VMV_Klehm2012}, PUBLISHER = {Eurographics Association}, YEAR = {2012}, DATE = {2012}, ABSTRACT = {Mirror systems have recently emerged as an alternative low-cost multi-view imaging solution. The use of these systems critically depends on the ability to compute the background of a multiply mirrored object. The images taken in such systems show a fractured, patterned view, making edge-guided segmentation difficult. Further, global illumination and light attenuation due to the mirrors make standard segmentation techniques fail. We therefore propose a system that allows a user to do the segmentation manually. We provide convenient tools that enable an interactive segmentation of kaleidoscopic images containing three-dimensional objects. Hereby, we explore suitable interaction and visualization schemes to guide the user. To achieve interactivity, we employ the GPU in all stages of the application, such as 2D/3D rendering as well as segmentation.}, BOOKTITLE = {VMV 2012 Vision, Modeling and Visualization}, EDITOR = {G{\"o}sele, Michael and Grosch, Thorsten and Preim, Bernhard and Theisel, Holger and T{\"o}nnies, Klaus-Dietz}, PAGES = {9--14}, ADDRESS = {Magdeburg, Germany}, }
Endnote
%0 Conference Proceedings %A Klehm, Oliver %A Reshetouski, Ilya %A Eisemanm, Elmar %A Seidel, Hans-Peter %A Ihrke, Ivo %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive Geometry-aware Segmentation for the Decomposition of Kaleidoscopic Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-13A6-2 %R 10.2312/PE/VMV/VMV12/009-014 %F OTHER: Local-ID: 728254C7FB47D385C1257AAA003C9878-KST_VMV_Klehm2012 %D 2012 %B 17th Annual Workshop on Vision, Modeling and Visualization %Z date of event: 2012-11-12 - 2012-11-14 %C Magdeburg, Germany %X Mirror systems have recently emerged as an alternative low-cost multi-view imaging solution. The use of these systems critically depends on the ability to compute the background of a multiply mirrored object. The images taken in such systems show a fractured, patterned view, making edge-guided segmentation difficult. Further, global illumination and light attenuation due to the mirrors make standard segmentation techniques fail. We therefore propose a system that allows a user to do the segmentation manually. We provide convenient tools that enable an interactive segmentation of kaleidoscopic images containing three-dimensional objects. Hereby, we explore suitable interaction and visualization schemes to guide the user. To achieve interactivity, we employ the GPU in all stages of the application, such as 2D/3D rendering as well as segmentation. %B VMV 2012 Vision, Modeling and Visualization %E G&#246;sele, Michael; Grosch, Thorsten; Preim, Bernhard; Theisel, Holger; T&#246;nnies, Klaus-Dietz %P 9 - 14 %I Eurographics Association %@ 978-3-905673-95-1
Klehm, O., Ritschel, T., Eisemann, E., and Seidel, H.-P. 2012b. Screen-space Bent Cones: A Practical Approach. In: GPU Pro 3. CRC Press, New York, NY.
Abstract
Ambient occlusion (AO) is a popular technique for visually improving both real-time as well as offline rendering. It decouples occlusion and shading providing a gain in efficiency. This results in an average occlusion that modulates the surface shading. However, this also reduces realism due to the lack of directional information. Bent normals were proposed as an amelioration that addresses this issue for offline rendering. Here, we describe how to compute bent normals as a cheap by-product of screen-space ambient occlusion (SSAO). Bent cones extend bent normals to further improve realism. These extensions combine the speed and simplicity of AO with physically more plausible lighting.
Export
BibTeX
@incollection{SSBC_GP3_Klehm2012, TITLE = {Screen-space Bent Cones: A Practical Approach}, AUTHOR = {Klehm, Oliver and Ritschel, Tobias and Eisemann, Elmar and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {9781439887820}, LOCALID = {Local-ID: 6A3293309F071AB4C12579E900378A1E-SSBC_GP3_Klehm2012}, PUBLISHER = {CRC Press}, ADDRESS = {New York, NY}, YEAR = {2012}, DATE = {2012}, ABSTRACT = {Ambient occlusion (AO) is a popular technique for visually improving both real-time as well as offline rendering. It decouples occlusion and shading providing a gain in efficiency. This results in an average occlusion that modulates the surface shading. However, this also reduces realism due to the lack of directional information. Bent normals were proposed as an amelioration that addresses this issue for offline rendering. Here, we describe how to compute bent normals as a cheap by-product of screen-space ambient occlusion (SSAO). Bent cones extend bent normals to further improve realism. These extensions combine the speed and simplicity of AO with physically more plausible lighting.}, BOOKTITLE = {GPU Pro 3}, EDITOR = {Engel, Wolfgang}, PAGES = {191--207}, }
Endnote
%0 Book Section %A Klehm, Oliver %A Ritschel, Tobias %A Eisemann, Elmar %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Screen-space Bent Cones: A Practical Approach : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-1476-6 %F OTHER: Local-ID: 6A3293309F071AB4C12579E900378A1E-SSBC_GP3_Klehm2012 %D 2012 %X Ambient occlusion (AO) is a popular technique for visually improving both real-time as well as offline rendering. It decouples occlusion and shading providing a gain in efficiency. This results in an average occlusion that modulates the surface shading. However, this also reduces realism due to the lack of directional information. Bent normals were proposed as an amelioration that addresses this issue for offline rendering. Here, we describe how to compute bent normals as a cheap by-product of screen-space ambient occlusion (SSAO). Bent cones extend bent normals to further improve realism. These extensions combine the speed and simplicity of AO with physically more plausible lighting. %B GPU Pro 3 %E Engel, Wolfgang %P 191 - 207 %I CRC Press %C New York, NY %@ 9781439887820
Kerber, J., Wang, M., Chang, J., Zhang, J.J., Belyaev, A., and Seidel, H.-P. 2012a. Computer Assisted Relief Generation - A Survey. Computer Graphics Forum31, 8.
Abstract
In this paper we present an overview of the achievements accomplished to date in the field of computer aided relief generation. We delineate the problem, classify different solutions, analyze similarities, investigate the development and review the approaches according to their particular relative strengths and weaknesses. Moreover, we describe remaining challenges and point out prospective extensions. In consequence this survey is likewise addressed to researchers and artists through providing valuable insights into the theory behind the different concepts in this field and augmenting the options available among the methods presented with regard to practical application.
Export
BibTeX
@article{Kerber2012_2, TITLE = {Computer Assisted Relief Generation -- A Survey}, AUTHOR = {Kerber, Jens and Wang, Meili and Chang, Jian and Zhang, Jian J. and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2012.03185.x}, LOCALID = {Local-ID: 3A79D0CC0263F875C1257ADA00444D8D-Kerber2012_2}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2012}, DATE = {2012}, ABSTRACT = {In this paper we present an overview of the achievements accomplished to date in the field of computer aided relief generation. We delineate the problem, classify different solutions, analyze similarities, investigate the development and review the approaches according to their particular relative strengths and weaknesses. Moreover, we describe remaining challenges and point out prospective extensions. In consequence this survey is likewise addressed to researchers and artists through providing valuable insights into the theory behind the different concepts in this field and augmenting the options available among the methods presented with regard to practical application.}, JOURNAL = {Computer Graphics Forum}, VOLUME = {31}, NUMBER = {8}, PAGES = {2363--2377}, }
Endnote
%0 Journal Article %A Kerber, Jens %A Wang, Meili %A Chang, Jian %A Zhang, Jian J. %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Computer Assisted Relief Generation - A Survey : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-15E7-2 %R 10.1111/j.1467-8659.2012.03185.x %F OTHER: Local-ID: 3A79D0CC0263F875C1257ADA00444D8D-Kerber2012_2 %7 2012-08-14 %D 2012 %X In this paper we present an overview of the achievements accomplished to date in the field of computer aided relief generation. We delineate the problem, classify different solutions, analyze similarities, investigate the development and review the approaches according to their particular relative strengths and weaknesses. Moreover, we describe remaining challenges and point out prospective extensions. In consequence this survey is likewise addressed to researchers and artists through providing valuable insights into the theory behind the different concepts in this field and augmenting the options available among the methods presented with regard to practical application. %J Computer Graphics Forum %V 31 %N 8 %& 2363 %P 2363 - 2377 %I Wiley-Blackwell %C Oxford, UK %@ false
Kerber, J., Wand, M., Bokeloh, M., and Seidel, H.-P. 2012b. Symmetry Detection in Large Scale City Scans. .
Abstract
In this report we present a novel method for detecting partial symmetries in very large point clouds of 3D city scans. Unlike previous work, which was limited to data sets of a few hundred megabytes maximum, our method scales to very large scenes. We map the detection problem to a nearestneighbor search in a low-dimensional feature space, followed by a cascade of tests for geometric clustering of potential matches. Our algorithm robustly handles noisy real-world scanner data, obtaining a recognition performance comparable to state-of-the-art methods. In practice, it scales linearly with the scene size and achieves a high absolute throughput, processing half a terabyte of raw scanner data over night on a dual socket commodity PC.
Export
BibTeX
@techreport{KerberBokelohWandSeidel2012, TITLE = {Symmetry Detection in Large Scale City Scans}, AUTHOR = {Kerber, Jens and Wand, Michael and Bokeloh, Martin and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0946-011X}, NUMBER = {MPI-I-2012-4-001}, YEAR = {2012}, ABSTRACT = {In this report we present a novel method for detecting partial symmetries in very large point clouds of 3D city scans. Unlike previous work, which was limited to data sets of a few hundred megabytes maximum, our method scales to very large scenes. We map the detection problem to a nearestneighbor search in a low-dimensional feature space, followed by a cascade of tests for geometric clustering of potential matches. Our algorithm robustly handles noisy real-world scanner data, obtaining a recognition performance comparable to state-of-the-art methods. In practice, it scales linearly with the scene size and achieves a high absolute throughput, processing half a terabyte of raw scanner data over night on a dual socket commodity PC.}, TYPE = {Research Report}, }
Endnote
%0 Report %A Kerber, Jens %A Wand, Michael %A Bokeloh, Martin %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Symmetry Detection in Large Scale City Scans : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-0427-4 %D 2012 %P 32 p. %X In this report we present a novel method for detecting partial symmetries in very large point clouds of 3D city scans. Unlike previous work, which was limited to data sets of a few hundred megabytes maximum, our method scales to very large scenes. We map the detection problem to a nearestneighbor search in a low-dimensional feature space, followed by a cascade of tests for geometric clustering of potential matches. Our algorithm robustly handles noisy real-world scanner data, obtaining a recognition performance comparable to state-of-the-art methods. In practice, it scales linearly with the scene size and achieves a high absolute throughput, processing half a terabyte of raw scanner data over night on a dual socket commodity PC. %B Research Report %@ false
Kalojanov, J., Bokeloh, M., Wand, M., Guibas, L., Seidel, H.-P., and Slusallek, P. 2012. Microtiles: Extracting Building Blocks from Correspondences. Computer Graphics Forum (Proc. SGP 2012)31, 5.
Export
BibTeX
@article{Kalojanov2012, TITLE = {Microtiles: Extracting Building Blocks from Correspondences}, AUTHOR = {Kalojanov, Javor and Bokeloh, Martin and Wand, Michael and Guibas, Leonidas and Seidel, Hans-Peter and Slusallek, Philipp}, LANGUAGE = {eng}, DOI = {10.1111/j.1467-8659.2012.03165.x}, LOCALID = {Local-ID: 62EBB7ABBD784112C1257AED003C5EE4-Kalojanov2012}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2012}, DATE = {2012}, JOURNAL = {Computer Graphics Forum (Proc. SGP)}, VOLUME = {31}, NUMBER = {5}, PAGES = {1597--1606}, BOOKTITLE = {Eurographics Symposium on Geometry Processing 2012 (SGP 2012)}, EDITOR = {Quak, Ewald}, }
Endnote
%0 Journal Article %A Kalojanov, Javor %A Bokeloh, Martin %A Wand, Michael %A Guibas, Leonidas %A Seidel, Hans-Peter %A Slusallek, Philipp %+ International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Universit&#228;t des Saarlandes %T Microtiles: Extracting Building Blocks from Correspondences : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-0D54-5 %F OTHER: Local-ID: 62EBB7ABBD784112C1257AED003C5EE4-Kalojanov2012 %R 10.1111/j.1467-8659.2012.03165.x %D 2012 %J Computer Graphics Forum %V 31 %N 5 %& 1597 %P 1597 - 1606 %I Wiley-Blackwell %C Oxford, UK %B Eurographics Symposium on Geometry Processing 2012 %O SGP 2012 Tallinn, Estonia, July 16 &#8211; 18, 2012 Symposium on Geometry Processing 2012
Jain, A., Thormählen, T., Ritschel, T., and Seidel, H.-P. 2012a. Exploring Shape Variations by 3D-Model Decomposition and Part-based Recombination. Computer Graphics Forum (Proc. EUROGRAPHICS 2012)31, 2.
Export
BibTeX
@article{JainEG2012, TITLE = {Exploring Shape Variations by {3D}-Model Decomposition and Part-based Recombination}, AUTHOR = {Jain, Arjun and Thorm{\"a}hlen, Thorsten and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2012.03042.x}, LOCALID = {Local-ID: 41BC7691719A8E13C1257B0300430734-JainEG2012}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2012}, DATE = {2012}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {31}, NUMBER = {2}, PAGES = {631--640}, BOOKTITLE = {EUROGRAPHICS 2012}, EDITOR = {Cignoni, Paolo and Ertl, Thomas}, }
Endnote
%0 Journal Article %A Jain, Arjun %A Thorm&#228;hlen, Thorsten %A Ritschel, Tobias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Exploring Shape Variations by 3D-Model Decomposition and Part-based Recombination : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-F349-1 %R 10.1111/j.1467-8659.2012.03042.x %F OTHER: Local-ID: 41BC7691719A8E13C1257B0300430734-JainEG2012 %7 2012 %D 2012 %J Computer Graphics Forum %V 31 %N 2 %& 631 %P 631 - 640 %I Wiley-Blackwell %C Oxford, UK %@ false %B EUROGRAPHICS 2012 %O EUROGRAPHICS 2012 The European Association for Computer Graphics 33rd Annual Conference ; Cagliari, Sardinia, Italy, May 13th &#8211; 18th, 2012 EG 2012
Jain, A., Thormählen, T., Ritschel, T., and Seidel, H.-P. 2012b. Material Memex: Automatic Material Suggestions for 3D Objects. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2012)31, 6.
Export
BibTeX
@article{JainSA2012, TITLE = {Material Memex: {Automatic} Material Suggestions for {3D} Objects}, AUTHOR = {Jain, Arjun and Thorm{\"a}hlen, Thorsten and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2366145.2366162}, LOCALID = {Local-ID: AE59BF88F44A94C0C1257B030042253E-JainSA2012}, PUBLISHER = {Association for Computing Machinery}, ADDRESS = {New York, NY}, YEAR = {2012}, DATE = {2012}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {31}, NUMBER = {6}, PAGES = {1--8}, EID = {143}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2012}, }
Endnote
%0 Journal Article %A Jain, Arjun %A Thorm&#228;hlen, Thorsten %A Ritschel, Tobias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Material Memex: Automatic Material Suggestions for 3D Objects : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-F34D-A %R 10.1145/2366145.2366162 %F OTHER: Local-ID: AE59BF88F44A94C0C1257B030042253E-JainSA2012 %7 2012 %D 2012 %J ACM Transactions on Graphics %V 31 %N 6 %& 1 %P 1 - 8 %Z sequence number: 143 %I Association for Computing Machinery %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2012 %O ACM SIGGRAPH Asia 2012 Singapore, 28 November - 1 December
Ihrke, I., Reshetouski, I., Manakov, A., Tevs, A., Wand, M., and Seidel, H.-P. 2012a. A Kaleidoscopic Approach to Surround Geometry and Reflectance Acquisition. 2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops (CVPR 2012), IEEE.
Export
BibTeX
@inproceedings{Ihrke2012a, TITLE = {A Kaleidoscopic Approach to Surround Geometry and Reflectance Acquisition}, AUTHOR = {Ihrke, Ivo and Reshetouski, Ilya and Manakov, Alkhazur and Tevs, Art and Wand, Michael and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4673-1611-8}, DOI = {10.1109/CVPRW.2012.6239347}, LOCALID = {Local-ID: 77354E08AB6311D2C1257AD7004BEBEC-Ihrke2012a}, PUBLISHER = {IEEE}, YEAR = {2012}, DATE = {2012-06}, BOOKTITLE = {2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops (CVPR 2012)}, PAGES = {29--36}, ADDRESS = {Providence, RI, USA}, }
Endnote
%0 Conference Proceedings %A Ihrke, Ivo %A Reshetouski, Ilya %A Manakov, Alkhazur %A Tevs, Art %A Wand, Michael %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Kaleidoscopic Approach to Surround Geometry and Reflectance Acquisition : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-124D-5 %R 10.1109/CVPRW.2012.6239347 %F OTHER: Local-ID: 77354E08AB6311D2C1257AD7004BEBEC-Ihrke2012a %D 2012 %B 2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops %Z date of event: 2012-06-16 - 2012-06-21 %C Providence, RI, USA %B 2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops %P 29 - 36 %I IEEE %@ 978-1-4673-1611-8
Ihrke, I., Reshetouski, I., Manakov, A., and Seidel, H.-P. 2012b. Three-Dimensional Kaleidoscopic Imaging. Computational Optical Sensing and Imaging (COSI 2012), OSA.
Export
BibTeX
@inproceedings{Ihrke2012, TITLE = {Three-Dimensional Kaleidoscopic Imaging}, AUTHOR = {Ihrke, Ivo and Reshetouski, Ilya and Manakov, Alkhazur and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {1-55752-947-7}, DOI = {10.1364/COSI.2012.CTu4B.8}, LOCALID = {Local-ID: D95527BADC2F41E4C1257AD7004B7E2A-Ihrke2012}, PUBLISHER = {OSA}, YEAR = {2012}, DATE = {2012}, BOOKTITLE = {Computational Optical Sensing and Imaging (COSI 2012)}, PAGES = {1--3}, ADDRESS = {Monterey, CA}, }
Endnote
%0 Conference Proceedings %A Ihrke, Ivo %A Reshetouski, Ilya %A Manakov, Alkhazur %A Seidel, Hans-Peter %+ External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Three-Dimensional Kaleidoscopic Imaging : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-12EE-A %F OTHER: Local-ID: D95527BADC2F41E4C1257AD7004B7E2A-Ihrke2012 %R 10.1364/COSI.2012.CTu4B.8 %D 2012 %B Computational Optical Sensing and Imaging Meeting %Z date of event: 2012-06-24 - 2012-06-28 %C Monterey, CA %B Computational Optical Sensing and Imaging %P 1 - 3 %I OSA %@ 1-55752-947-7
Herzog, R., Cadík, M., Aydin, T.O., Kim, K.I., Myszkowski, K., and Seidel, H.-P. 2012. NoRM: No-reference Image Quality Metric for Realistic Image Synthesis. Computer Graphics Forum (Proc. EUROGRAPHICS 2012)31, 2.
Abstract
Synthetically generating images and video frames of complex 3D scenes using <br>some photo-realistic rendering software is often prone to artifacts and <br>requires expert knowledge to tune the parameters. The manual work required for <br>detecting and preventing artifacts can be automated through objective quality <br>evaluation of synthetic images.<br>Most practical objective quality assessment methods of natural images rely on a <br>ground-truth reference, which is often not available in rendering applications. <br>While general purpose no-reference image quality assessment is a difficult <br>problem, we show in a subjective study that the performance of a dedicated <br>no-reference metric as presented in this paper can match the state-of-the-art <br>metrics that do require a reference. This level of predictive power is achieved <br>exploiting information about the underlying synthetic scene (e.g., 3D surfaces, <br>textures) instead<br>of merely considering color, and training our learning framework with typical <br>rendering artifacts. We show that our method successfully detects various <br>non-trivial types of artifacts such as noise and clamping bias due to <br>insufficient virtual point light sources, and shadow map discretization <br>artifacts. We also briefly discuss an inpainting method for automatic <br>correction of detected artifacts.
Export
BibTeX
@article{NoRM_EG2012, TITLE = {{NoRM}: {No-reference} Image Quality Metric for Realistic Image Synthesis}, AUTHOR = {Herzog, Robert and Cad{\'i}k, Martin and Aydin, Tunc Ozan and Kim, Kwang In and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2012.03055.x}, LOCALID = {Local-ID: 673028A8C798FD45C1257A47004B2978-NoRM_EG2012}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2012}, DATE = {2012}, ABSTRACT = {Synthetically generating images and video frames of complex 3D scenes using <br>some photo-realistic rendering software is often prone to artifacts and <br>requires expert knowledge to tune the parameters. The manual work required for <br>detecting and preventing artifacts can be automated through objective quality <br>evaluation of synthetic images.<br>Most practical objective quality assessment methods of natural images rely on a <br>ground-truth reference, which is often not available in rendering applications. <br>While general purpose no-reference image quality assessment is a difficult <br>problem, we show in a subjective study that the performance of a dedicated <br>no-reference metric as presented in this paper can match the state-of-the-art <br>metrics that do require a reference. This level of predictive power is achieved <br>exploiting information about the underlying synthetic scene (e.g., 3D surfaces, <br>textures) instead<br>of merely considering color, and training our learning framework with typical <br>rendering artifacts. We show that our method successfully detects various <br>non-trivial types of artifacts such as noise and clamping bias due to <br>insufficient virtual point light sources, and shadow map discretization <br>artifacts. We also briefly discuss an inpainting method for automatic <br>correction of detected artifacts.}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {31}, NUMBER = {2}, PAGES = {545--554}, BOOKTITLE = {EUROGRAPHICS 2012}, EDITOR = {Cignoni, Paolo and Ertl, Thomas}, }
Endnote
%0 Journal Article %A Herzog, Robert %A Cad&#237;k, Martin %A Aydin, Tunc Ozan %A Kim, Kwang In %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T NoRM: No-reference Image Quality Metric for Realistic Image Synthesis : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-1586-9 %R 10.1111/j.1467-8659.2012.03055.x %F OTHER: Local-ID: 673028A8C798FD45C1257A47004B2978-NoRM_EG2012 %7 2012-06-14 %D 2012 %X Synthetically generating images and video frames of complex 3D scenes using <br>some photo-realistic rendering software is often prone to artifacts and <br>requires expert knowledge to tune the parameters. The manual work required for <br>detecting and preventing artifacts can be automated through objective quality <br>evaluation of synthetic images.<br>Most practical objective quality assessment methods of natural images rely on a <br>ground-truth reference, which is often not available in rendering applications. <br>While general purpose no-reference image quality assessment is a difficult <br>problem, we show in a subjective study that the performance of a dedicated <br>no-reference metric as presented in this paper can match the state-of-the-art <br>metrics that do require a reference. This level of predictive power is achieved <br>exploiting information about the underlying synthetic scene (e.g., 3D surfaces, <br>textures) instead<br>of merely considering color, and training our learning framework with typical <br>rendering artifacts. We show that our method successfully detects various <br>non-trivial types of artifacts such as noise and clamping bias due to <br>insufficient virtual point light sources, and shadow map discretization <br>artifacts. We also briefly discuss an inpainting method for automatic <br>correction of detected artifacts. %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 31 %N 2 %& 545 %P 545 - 554 %I Blackwell-Wiley %C Oxford %@ false %B EUROGRAPHICS 2012 %O EUROGRAPHICS 2012 The European Association for Computer Graphics 33rd Annual Conference, Cagliari, Sardinia, Italy, May 13th &#8211; 18th, 2012 EG 2012
Günther, D., Seidel, H.-P., and Weinkauf, T. 2012. Extraction of Dominant Extremal Structures in Volumetric Data Using Separatrix Persistence. Computer Graphics Forum31, 8.
Abstract
Extremal lines and surfaces are features of a 3D scalar field where the scalar function becomes minimal or maximal with respect to a local neighborhood. These features are important in many applications, e.g., computer tomography, fluid dynamics, cell biology. We present a novel topological method to extract these features using discrete Morse theory. In particular, we extend the notion of Separatrix Persistence from 2D to 3D, which gives us a robust estimation of the feature strength for extremal lines and surfaces. Not only does it allow us to determine the most important (parts of) extremal lines and surfaces, it also serves as a robust filtering measure of noise-induced structures. Our purely combinatorial method does not require derivatives or any other numerical computations.
Export
BibTeX
@article{guenther12b, TITLE = {Extraction of Dominant Extremal Structures in Volumetric Data Using Separatrix Persistence}, AUTHOR = {G{\"u}nther, David and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, ISSN = {1467-8659}, DOI = {10.1111/j.1467-8659.2012.03222.x}, LOCALID = {Local-ID: 8F6C93DF947E3889C1257AD800396653-guenther12b}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2012}, DATE = {2012}, ABSTRACT = {Extremal lines and surfaces are features of a 3D scalar field where the scalar function becomes minimal or maximal with respect to a local neighborhood. These features are important in many applications, e.g., computer tomography, fluid dynamics, cell biology. We present a novel topological method to extract these features using discrete Morse theory. In particular, we extend the notion of Separatrix Persistence from 2D to 3D, which gives us a robust estimation of the feature strength for extremal lines and surfaces. Not only does it allow us to determine the most important (parts of) extremal lines and surfaces, it also serves as a robust filtering measure of noise-induced structures. Our purely combinatorial method does not require derivatives or any other numerical computations.}, JOURNAL = {Computer Graphics Forum}, VOLUME = {31}, NUMBER = {8}, PAGES = {2554--2566}, }
Endnote
%0 Journal Article %A G&#252;nther, David %A Seidel, Hans-Peter %A Weinkauf, Tino %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Extraction of Dominant Extremal Structures in Volumetric Data Using Separatrix Persistence : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-14F7-6 %F OTHER: Local-ID: 8F6C93DF947E3889C1257AD800396653-guenther12b %R 10.1111/j.1467-8659.2012.03222.x %7 2012-10-08 %D 2012 %X Extremal lines and surfaces are features of a 3D scalar field where the scalar function becomes minimal or maximal with respect to a local neighborhood. These features are important in many applications, e.g., computer tomography, fluid dynamics, cell biology. We present a novel topological method to extract these features using discrete Morse theory. In particular, we extend the notion of Separatrix Persistence from 2D to 3D, which gives us a robust estimation of the feature strength for extremal lines and surfaces. Not only does it allow us to determine the most important (parts of) extremal lines and surfaces, it also serves as a robust filtering measure of noise-induced structures. Our purely combinatorial method does not require derivatives or any other numerical computations. %J Computer Graphics Forum %V 31 %N 8 %& 2554 %P 2554 - 2566 %I Wiley-Blackwell %C Oxford, UK %@ false
Elhayek, A., Stoll, C., Hasler, N., Kim, K.I., Seidel, H.-P., and Theobalt, C. 2012a. Spatio-temporal Motion Tracking with Unsynchronized Cameras. 2012 IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2012), IEEE.
Abstract
We present a new spatio-temporal method for markerless motion capture. We reconstruct the pose and motion of a character from a multi-view video sequence without requiring the cameras to be synchronized and without aligning captured frames in time. By formulating the model-to-image similarity measure as a temporally continuous functional, we are also able to reconstruct motion in much higher temporal detail than was possible with previous synchronized approaches. By purposefully running cameras unsynchronized we can capture even very fast motion at speeds that off-the-shelf but high quality cameras provide.
Export
BibTeX
@inproceedings{ElHayek2012a, TITLE = {Spatio-temporal Motion Tracking with Unsynchronized Cameras}, AUTHOR = {Elhayek, Ahmed and Stoll, Carsten and Hasler, Nils and Kim, Kwang In and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {1063-6919}, ISBN = {978-1-4673-1226-4}, DOI = {10.1109/CVPR.2012.6247886}, LOCALID = {Local-ID: BAE13C070CC977C1C1257AD7003977F5-ElHayek2012a}, PUBLISHER = {IEEE}, YEAR = {2012}, DATE = {2012}, ABSTRACT = {We present a new spatio-temporal method for markerless motion capture. We reconstruct the pose and motion of a character from a multi-view video sequence without requiring the cameras to be synchronized and without aligning captured frames in time. By formulating the model-to-image similarity measure as a temporally continuous functional, we are also able to reconstruct motion in much higher temporal detail than was possible with previous synchronized approaches. By purposefully running cameras unsynchronized we can capture even very fast motion at speeds that off-the-shelf but high quality cameras provide.}, BOOKTITLE = {2012 IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2012)}, PAGES = {1870--1877}, ADDRESS = {Providence, RI}, }
Endnote
%0 Conference Proceedings %A Elhayek, Ahmed %A Stoll, Carsten %A Hasler, Nils %A Kim, Kwang In %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Spatio-temporal Motion Tracking with Unsynchronized Cameras : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-138D-B %F OTHER: Local-ID: BAE13C070CC977C1C1257AD7003977F5-ElHayek2012a %R 10.1109/CVPR.2012.6247886 %D 2012 %B 2012 IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2012-06-16 - 2012-06-21 %C Providence, RI %X We present a new spatio-temporal method for markerless motion capture. We reconstruct the pose and motion of a character from a multi-view video sequence without requiring the cameras to be synchronized and without aligning captured frames in time. By formulating the model-to-image similarity measure as a temporally continuous functional, we are also able to reconstruct motion in much higher temporal detail than was possible with previous synchronized approaches. By purposefully running cameras unsynchronized we can capture even very fast motion at speeds that off-the-shelf but high quality cameras provide. %B 2012 IEEE Conference on Computer Vision and Pattern Recognition %P 1870 - 1877 %I IEEE %@ false
Elhayek, A., Stoll, C., Kim, K.I., Seidel, H.-P., and Theobalt, C. 2012b. Feature-based Multi-video Synchronization with Subframe Accuracy. Pattern Recognition (DAGM 2012/OAGM 2012), Springer.
Export
BibTeX
@inproceedings{Elhayek2012, TITLE = {Feature-based Multi-video Synchronization with Subframe Accuracy}, AUTHOR = {Elhayek, Ahmed and Stoll, Carsten and Kim, Kwang In and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-3-642-32716-2}, DOI = {10.1007/978-3-642-32717-9_27}, LOCALID = {Local-ID: CA9960222B05A1B7C1257AD70076C360-Elhayek2012}, PUBLISHER = {Springer}, YEAR = {2012}, DATE = {2012}, BOOKTITLE = {Pattern Recognition (DAGM 2012/OAGM 2012)}, EDITOR = {Pinz, Axel and Pock, Thomas and Bischof, Horst and Leberl, Franz}, PAGES = {266--275}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {7476}, ADDRESS = {Graz, Austria}, }
Endnote
%0 Conference Proceedings %A Elhayek, Ahmed %A Stoll, Carsten %A Kim, Kwang In %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Feature-based Multi-video Synchronization with Subframe Accuracy : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-1212-7 %R 10.1007/978-3-642-32717-9_27 %F OTHER: Local-ID: CA9960222B05A1B7C1257AD70076C360-Elhayek2012 %D 2012 %B 34th Symposium of the German Association for Pattern Recognition ; 36th Annual Austrian Association for Pattern Recognition Conference %Z date of event: 2012-08-28 - 2012-08-31 %C Graz, Austria %B Pattern Recognition %E Pinz, Axel; Pock, Thomas; Bischof, Horst; Leberl, Franz %P 266 - 275 %I Springer %@ 978-3-642-32716-2 %B Lecture Notes in Computer Science %N 7476 %U https://rdcu.be/dJK7K
Elek, O., Ritschel, T., Wilkie, A., and Seidel, H.-P. 2012a. Interactive Cloud Rendering Using Temporally Coherent Photon Mapping. Computers & Graphics36, 8.
Abstract
This work presents a novel interactive algorithm for simulation of light <br>transport in clouds. Exploiting the high temporal coherence of the typical <br>illumination and morphology of clouds we build on volumetric photon mapping, <br>which we modify to allow for interactive rendering speeds -- instead of <br>building a fresh irregular photon map for every scene state change we <br>accumulate photon contributions in a regular grid structure. This is then <br>continuously being refreshed by re-shooting only a fraction of the total amount <br>of photons in each frame. To maintain its temporal coherence and low variance, <br>a low-resolution grid is initially used, and is then upsampled to the density <br>field resolution on a physical basis in each frame. We also present a technique <br>to store and reconstruct the angular illumination information by exploiting <br>properties of the standard Henyey-Greenstein function, namely its ability to <br>express anisotropic angular distributions with a single dominating direction.<br>The presented method is physically-plausible, conceptually simple and <br>comparatively easy to implement. Moreover, it operates only above the cloud <br>density field, thus not requiring any precomputation, and handles all light <br>sources typical for the given environment, i.e. where one of the light sources <br>dominates.
Export
BibTeX
@article{Elek2012b, TITLE = {Interactive Cloud Rendering Using Temporally Coherent Photon Mapping}, AUTHOR = {Elek, Oskar and Ritschel, Tobias and Wilkie, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0097-8493}, DOI = {10.1016/j.cag.2012.10.002}, PUBLISHER = {Elsevier}, ADDRESS = {Amsterdam}, YEAR = {2012}, DATE = {2012}, ABSTRACT = {This work presents a novel interactive algorithm for simulation of light <br>transport in clouds. Exploiting the high temporal coherence of the typical <br>illumination and morphology of clouds we build on volumetric photon mapping, <br>which we modify to allow for interactive rendering speeds -- instead of <br>building a fresh irregular photon map for every scene state change we <br>accumulate photon contributions in a regular grid structure. This is then <br>continuously being refreshed by re-shooting only a fraction of the total amount <br>of photons in each frame. To maintain its temporal coherence and low variance, <br>a low-resolution grid is initially used, and is then upsampled to the density <br>field resolution on a physical basis in each frame. We also present a technique <br>to store and reconstruct the angular illumination information by exploiting <br>properties of the standard Henyey-Greenstein function, namely its ability to <br>express anisotropic angular distributions with a single dominating direction.<br>The presented method is physically-plausible, conceptually simple and <br>comparatively easy to implement. Moreover, it operates only above the cloud <br>density field, thus not requiring any precomputation, and handles all light <br>sources typical for the given environment, i.e. where one of the light sources <br>dominates.}, JOURNAL = {Computers \& Graphics}, VOLUME = {36}, NUMBER = {8}, PAGES = {1109--1118}, }
Endnote
%0 Journal Article %A Elek, Oskar %A Ritschel, Tobias %A Wilkie, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive Cloud Rendering Using Temporally Coherent Photon Mapping : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-F427-5 %R 10.1016/j.cag.2012.10.002 %7 2012-01-17 %D 2012 %X This work presents a novel interactive algorithm for simulation of light <br>transport in clouds. Exploiting the high temporal coherence of the typical <br>illumination and morphology of clouds we build on volumetric photon mapping, <br>which we modify to allow for interactive rendering speeds -- instead of <br>building a fresh irregular photon map for every scene state change we <br>accumulate photon contributions in a regular grid structure. This is then <br>continuously being refreshed by re-shooting only a fraction of the total amount <br>of photons in each frame. To maintain its temporal coherence and low variance, <br>a low-resolution grid is initially used, and is then upsampled to the density <br>field resolution on a physical basis in each frame. We also present a technique <br>to store and reconstruct the angular illumination information by exploiting <br>properties of the standard Henyey-Greenstein function, namely its ability to <br>express anisotropic angular distributions with a single dominating direction.<br>The presented method is physically-plausible, conceptually simple and <br>comparatively easy to implement. Moreover, it operates only above the cloud <br>density field, thus not requiring any precomputation, and handles all light <br>sources typical for the given environment, i.e. where one of the light sources <br>dominates. %J Computers & Graphics %V 36 %N 8 %& 1109 %P 1109 - 1118 %I Elsevier %C Amsterdam %@ false
Elek, O., Ritschel, T., Wilkie, A., and Seidel, H.-P. 2012b. Interactive Cloud Rendering Using Temporally-coherent Photon Mapping. Graphics Interface 2012 (GI 2012), Canadian Information Processing Society.
Abstract
This paper presents an interactive algorithm for simulation of light transport in clouds. Exploiting the high temporal coherence of the typical illumination and morphology of clouds we build on volumetric photon mapping, which we modify to allow for interactive rendering speeds --- instead of building a fresh irregular photon map for every scene state change we accumulate photon contributions in a regular grid structure. This is then continuously being refreshed by re-shooting only a fraction of the total amount of photons in each frame. To maintain its temporal coherence and low variance, a low-resolution grid is used, and is then upsampled to the density field resolution in each frame. We also present a technique to store and reconstruct the angular illumination information by exploiting properties of the standard Henyey-Greenstein phase function, namely its ability to express anisotropic angular distributions with a single dominating direction. The presented method is physically-plausible, conceptually simple and comparatively easy to implement. Moreover, it operates only on the cloud density field, thus not requiring any precomputation, and handles all light sources typical for the given environment, i.e. where one of the light sources dominates.
Export
BibTeX
@inproceedings{Elek2012a, TITLE = {Interactive Cloud Rendering Using Temporally-coherent Photon Mapping}, AUTHOR = {Elek, Oskar and Ritschel, Tobias and Wilkie, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0713-5424}, ISBN = {978-1-4503-1420-6}, LOCALID = {Local-ID: 3CB9A3047BF33EE2C1257AF000674820-Elek2012a}, PUBLISHER = {Canadian Information Processing Society}, YEAR = {2012}, DATE = {2012-05}, ABSTRACT = {This paper presents an interactive algorithm for simulation of light transport in clouds. Exploiting the high temporal coherence of the typical illumination and morphology of clouds we build on volumetric photon mapping, which we modify to allow for interactive rendering speeds --- instead of building a fresh irregular photon map for every scene state change we accumulate photon contributions in a regular grid structure. This is then continuously being refreshed by re-shooting only a fraction of the total amount of photons in each frame. To maintain its temporal coherence and low variance, a low-resolution grid is used, and is then upsampled to the density field resolution in each frame. We also present a technique to store and reconstruct the angular illumination information by exploiting properties of the standard Henyey-Greenstein phase function, namely its ability to express anisotropic angular distributions with a single dominating direction. The presented method is physically-plausible, conceptually simple and comparatively easy to implement. Moreover, it operates only on the cloud density field, thus not requiring any precomputation, and handles all light sources typical for the given environment, i.e. where one of the light sources dominates.}, BOOKTITLE = {Graphics Interface 2012 (GI 2012)}, EDITOR = {Brooks, Stephen and Hawkey, Kirstie}, PAGES = {141--148}, ADDRESS = {Toronto, ON, Canada}, }
Endnote
%0 Conference Proceedings %A Elek, Oskar %A Ritschel, Tobias %A Wilkie, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive Cloud Rendering Using Temporally-coherent Photon Mapping : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-F42E-8 %F OTHER: Local-ID: 3CB9A3047BF33EE2C1257AF000674820-Elek2012a %D 2012 %B Graphic Interface 2012 %Z date of event: 2012-05-28 - 2012-05-30 %C Toronto, ON, Canada %X This paper presents an interactive algorithm for simulation of light transport in clouds. Exploiting the high temporal coherence of the typical illumination and morphology of clouds we build on volumetric photon mapping, which we modify to allow for interactive rendering speeds --- instead of building a fresh irregular photon map for every scene state change we accumulate photon contributions in a regular grid structure. This is then continuously being refreshed by re-shooting only a fraction of the total amount of photons in each frame. To maintain its temporal coherence and low variance, a low-resolution grid is used, and is then upsampled to the density field resolution in each frame. We also present a technique to store and reconstruct the angular illumination information by exploiting properties of the standard Henyey-Greenstein phase function, namely its ability to express anisotropic angular distributions with a single dominating direction. The presented method is physically-plausible, conceptually simple and comparatively easy to implement. Moreover, it operates only on the cloud density field, thus not requiring any precomputation, and handles all light sources typical for the given environment, i.e. where one of the light sources dominates. %B Graphics Interface 2012 %E Brooks, Stephen; Hawkey, Kirstie %P 141 - 148 %I Canadian Information Processing Society %@ false
Didyk, P., Ritschel, T., Eisemann, E., Myszkowski, K., and Seidel, H.-P. 2012a. Apparent Stereo: The Cornsweet Illusion Can Enhance Perceived Depth. Human Vision and Electronic Imaging XVII (HVEI 2012), SPIE/IS&T.
Export
BibTeX
@inproceedings{Didyk2012Cornsweet, TITLE = {Apparent Stereo: The {Cornsweet} Illusion Can Enhance Perceived Depth}, AUTHOR = {Didyk, Piotr and Ritschel, Tobias and Eisemann, Elmar and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0277-786X}, ISBN = {9780819489388}, DOI = {10.1117/12.907612}, LOCALID = {Local-ID: B0D8F2F7DF789CF4C1257A710043B8CF-Didyk2012Cornsweet}, PUBLISHER = {SPIE/IS\&T}, YEAR = {2012}, DATE = {2012}, BOOKTITLE = {Human Vision and Electronic Imaging XVII (HVEI 2012)}, EDITOR = {Rogowitz, Bernice E. and Pappas, Thrasyvoulos N. and de Ridder, Huib}, PAGES = {1--12}, SERIES = {Proceedings of SPIE}, VOLUME = {8291}, ADDRESS = {Burlingame, CA, USA}, }
Endnote
%0 Conference Proceedings %A Didyk, Piotr %A Ritschel, Tobias %A Eisemann, Elmar %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Apparent Stereo: The Cornsweet Illusion Can Enhance Perceived Depth : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-13C8-5 %R 10.1117/12.907612 %F OTHER: Local-ID: B0D8F2F7DF789CF4C1257A710043B8CF-Didyk2012Cornsweet %D 2012 %B Human Vision and Electronic Imaging XVII %Z date of event: 2012-01-23 - 2012-01-26 %C Burlingame, CA, USA %B Human Vision and Electronic Imaging XVII %E Rogowitz, Bernice E.; Pappas, Thrasyvoulos N.; de Ridder, Huib %P 1 - 12 %I SPIE/IS&T %@ 9780819489388 %B Proceedings of SPIE %N 8291 %@ false
Didyk, P., Ritschel, T., Eisemann, E., Myszkowski, K., Seidel, H.-P., and Matusik, W. 2012b. A Luminance-contrast-aware Disparity Model and Applications. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2012)31, 6.
Export
BibTeX
@article{Didyk2012SigAsia, TITLE = {A Luminance-contrast-aware Disparity Model and Applications}, AUTHOR = {Didyk, Piotr and Ritschel, Tobias and Eisemann, Elmar and Myszkowski, Karol and Seidel, Hans-Peter and Matusik, Wojciech}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2366145.2366203}, LOCALID = {Local-ID: C754E5AADEF5EA2AC1257AFE0056029B-Didyk2012SigAsia}, PUBLISHER = {Association for Computing Machinery}, ADDRESS = {New York, NY}, YEAR = {2012}, DATE = {2012}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {31}, NUMBER = {6}, PAGES = {184:1--184:10}, EID = {184}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2012}, }
Endnote
%0 Journal Article %A Didyk, Piotr %A Ritschel, Tobias %A Eisemann, Elmar %A Myszkowski, Karol %A Seidel, Hans-Peter %A Matusik, Wojciech %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T A Luminance-contrast-aware Disparity Model and Applications : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-F3C4-9 %R 10.1145/2366145.2366203 %F OTHER: Local-ID: C754E5AADEF5EA2AC1257AFE0056029B-Didyk2012SigAsia %D 2012 %J ACM Transactions on Graphics %V 31 %N 6 %& 184:1 %P 184:1 - 184:10 %Z sequence number: 184 %I Association for Computing Machinery %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2012 %O Singapore, 28 November - 1 December ACM SIGGRAPH Asia 2012
Čadík, M., Herzog, R., Mantiuk, R., Myszkowski, K., and Seidel, H.-P. 2012. New Measurements Reveal Weaknesses of Image Quality Metrics in Evaluating Graphics Artifacts. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2012)31, 6.
Export
BibTeX
@article{cadik12iqm_evaluation, TITLE = {New Measurements Reveal Weaknesses of Image Quality Metrics in Evaluating Graphics Artifacts}, AUTHOR = {{\v C}ad{\'i}k, Martin and Herzog, Robert and Mantiuk, Rafa{\l} and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2366145.2366166}, LOCALID = {Local-ID: 1D6D7862B7800D8DC1257AD7003415AE-cadik12iqm_evaluation}, PUBLISHER = {Association for Computing Machinery}, ADDRESS = {New York, NY}, YEAR = {2012}, DATE = {2012}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {31}, NUMBER = {6}, PAGES = {1--10}, EID = {147}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2012}, }
Endnote
%0 Journal Article %A &#268;ad&#237;k, Martin %A Herzog, Robert %A Mantiuk, Rafa&#322; %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T New Measurements Reveal Weaknesses of Image Quality Metrics in Evaluating Graphics Artifacts : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-166E-6 %R 10.1145/2366145.2366166 %F OTHER: Local-ID: 1D6D7862B7800D8DC1257AD7003415AE-cadik12iqm_evaluation %7 2012 %D 2012 %J ACM Transactions on Graphics %V 31 %N 6 %& 1 %P 1 - 10 %Z sequence number: 147 %I Association for Computing Machinery %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2012 %O ACM SIGGRAPH Asia 2012 Singapore, 28 November - 1 December 2012
Bokeloh, M., Wand, M., Seidel, H.-P., and Koltun, V. 2012. An Algebraic Model for Parameterized Shape Editing. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2012)31, 4.
Export
BibTeX
@article{Bokeloh2012algMod, TITLE = {An Algebraic Model for Parameterized Shape Editing}, AUTHOR = {Bokeloh, Martin and Wand, Michael and Seidel, Hans-Peter and Koltun, Vladlen}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2185520.2185574}, LOCALID = {Local-ID: A1326DBCE39F6AA4C1257AED003C12F0-Bokeloh2012algMod}, PUBLISHER = {Association for Computing Machinery}, ADDRESS = {New York, NY}, YEAR = {2012}, DATE = {2012}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {31}, NUMBER = {4}, PAGES = {1--10}, EID = {78}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2012}, }
Endnote
%0 Journal Article %A Bokeloh, Martin %A Wand, Michael %A Seidel, Hans-Peter %A Koltun, Vladlen %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T An Algebraic Model for Parameterized Shape Editing : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-14E9-6 %F OTHER: Local-ID: A1326DBCE39F6AA4C1257AED003C12F0-Bokeloh2012algMod %R 10.1145/2185520.2185574 %7 2012 %D 2012 %J ACM Transactions on Graphics %V 31 %N 4 %& 1 %P 1 - 10 %Z sequence number: 78 %I Association for Computing Machinery %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2012 %O Los Angeles, California ACM SIGGRAPH 2012
Bharaj, G., Thormählen, T., Seidel, H.-P., and Theobalt, C. 2012. Automatically Rigging Multi-component Characters. Computer Graphics Forum (Proc. EUROGRAPHICS 2012)31, 2.
Export
BibTeX
@article{GAURAV2012, TITLE = {Automatically Rigging Multi-component Characters}, AUTHOR = {Bharaj, Gaurav and Thorm{\"a}hlen, Thorsten and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2012.03034.x}, LOCALID = {Local-ID: 7FDE39168BE34083C1257AE4005C8329-GAURAV2012}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2012}, DATE = {2012}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {31}, NUMBER = {2}, PAGES = {755--764}, BOOKTITLE = {EUROGRAPHICS 2012}, EDITOR = {Cignoni, Paolo and Ertl, Thomas}, }
Endnote
%0 Journal Article %A Bharaj, Gaurav %A Thorm&#228;hlen, Thorsten %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Automatically Rigging Multi-component Characters : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-14E2-3 %F OTHER: Local-ID: 7FDE39168BE34083C1257AE4005C8329-GAURAV2012 %R 10.1111/j.1467-8659.2012.03034.x %7 2012-06-07 %D 2012 %J Computer Graphics Forum %V 31 %N 2 %& 755 %P 755 - 764 %I Wiley-Blackwell %C Oxford, UK %@ false %B EUROGRAPHICS 2012 %O The European Association for Computer Graphics 33rd Annual Conference ; Cagliari, Sardinia, Italy, May 13th &#8211; 18th, 2012 EUROGRAPHICS 2012 %U http://gvv.mpi-inf.mpg.de/files/old_site_files/armc_eg_2012.pdf
Baboud, L., Eisemann, E., and Seidel, H.-P. 2012. Precomputed Safety Shapes for Efficient and Accurate Height-field Rendering. IEEE transactions on visualization and computer graphics18, 11.
Export
BibTeX
@article{Baboud2012, TITLE = {Precomputed Safety Shapes for Efficient and Accurate Height-field Rendering}, AUTHOR = {Baboud, Lionel and Eisemann, Elmar and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2011.281}, LOCALID = {Local-ID: 418C87AB7BA9A992C1257B2800347870-Baboud2012}, PUBLISHER = {IEEE}, ADDRESS = {Piscataway, NJ}, YEAR = {2012}, DATE = {2012}, JOURNAL = {IEEE transactions on visualization and computer graphics}, VOLUME = {18}, NUMBER = {11}, PAGES = {1811--1823}, }
Endnote
%0 Journal Article %A Baboud, Lionel %A Eisemann, Elmar %A Seidel, Hans-Peter %+ Cluster of Excellence Multimodal Computing Cluster of Excellence Multimodal Computing Computer Graphics, MPI for Informatics, Max Planck Society %T Precomputed Safety Shapes for Efficient and Accurate Height-field Rendering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-14D0-B %F OTHER: Local-ID: 418C87AB7BA9A992C1257B2800347870-Baboud2012 %R 10.1109/TVCG.2011.281 %7 2011-12-08 %D 2012 %J IEEE transactions on visualization and computer graphics %V 18 %N 11 %& 1811 %P 1811 - 1823 %I IEEE %C Piscataway, NJ %@ false
2011
Xu, F., Liu, Y., Stoll, C., et al. 2011. Video-based Characters -- Creating New Human Performances from a Multi-view Video Database. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2011)30, 4.
Export
BibTeX
@article{Xu2011, TITLE = {Video-based Characters -- Creating New Human Performances from a Multi-view Video Database}, AUTHOR = {Xu, Feng and Liu, Yebin and Stoll, Carsten and Tompkin, James and Bharaj, Gaurav and Dai, Qionghai and Seidel, Hans-Peter and Kautz, Jan and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2010324.1964927}, PUBLISHER = {Association for Computing Machinery}, ADDRESS = {New York, NY}, YEAR = {2011}, DATE = {2011}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {30}, NUMBER = {4}, PAGES = {1--20}, EID = {32}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2011}, }
Endnote
%0 Journal Article %A Xu, Feng %A Liu, Yebin %A Stoll, Carsten %A Tompkin, James %A Bharaj, Gaurav %A Dai, Qionghai %A Seidel, Hans-Peter %A Kautz, Jan %A Theobalt, Christian %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Video-based Characters -- Creating New Human Performances from a Multi-view Video Database : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-1417-3 %F EDOC: 618893 %R 10.1145/2010324.1964927 %7 2011 %D 2011 %* Review method: peer-reviewed %J ACM Transactions on Graphics %V 30 %N 4 %& 1 %P 1 - 20 %Z sequence number: 32 %I Association for Computing Machinery %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2011 %O ACM SIGGRAPH 2011 Vancouver, BC, Canada
Wu, C., Varanasi, K., Liu, Y., Seidel, H.-P., and Theobalt, C. 2011. Shading-based Dynamic Shape Refinement from Multi-view Video under General Illumination. IEEE International Conference on Computer Vision (ICCV 2011), IEEE.
Export
BibTeX
@inproceedings{Wu_iccv2011, TITLE = {Shading-based Dynamic Shape Refinement from Multi-view Video under General Illumination}, AUTHOR = {Wu, Chenglei and Varanasi, Kiran and Liu, Yebin and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-4577-1101-5}, DOI = {10.1109/ICCV.2011.6126358}, PUBLISHER = {IEEE}, YEAR = {2011}, DATE = {2011}, BOOKTITLE = {IEEE International Conference on Computer Vision (ICCV 2011)}, PAGES = {1108--1115}, ADDRESS = {Barcelona, Spain}, }
Endnote
%0 Conference Proceedings %A Wu, Chenglei %A Varanasi, Kiran %A Liu, Yebin %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Shading-based Dynamic Shape Refinement from Multi-view Video under General Illumination : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13F7-4 %F EDOC: 618885 %R 10.1109/ICCV.2011.6126358 %D 2011 %B IEEE International Conference on Computer Vision %Z date of event: 2011-11-06 - 2011-11-13 %C Barcelona, Spain %B IEEE International Conference on Computer Vision %P 1108 - 1115 %I IEEE %@ 978-1-4577-1101-5
Tevs, A., Berner, A., Wand, M., Ihrke, I., and Seidel, H.-P. 2011. Intrinsic Shape Matching by Planned Landmark Sampling. Computer Graphics Forum (Proc. EUROGRAPHICS 2011)30, 2.
Abstract
Recently, the problem of intrinsic shape matching has received a lot of attention. A number of algorithms have been proposed, among which random-sampling-based techniques have been particularly successful due to their generality and efficiency. We introduce a new sampling-based shape matching algorithm that uses a planning step to find optimized "landmark" points. These points are matched first in order to maximize the information gained and thus minimize the sampling costs. Our approach makes three main contributions: First, the new technique leads to a significant improvement in performance, which we demonstrate on a number of benchmark scenarios. Second, our technique does not require any keypoint detection. This is often a significant limitation for models that do not show sufficient surface features. Third, we examine the actual numerical degrees of freedom of the matching problem for a given piece of geometry. In contrast to previous results, our estimates take into account unprecise geodesics and potentially numerically unfavorable geometry of general topology, giving a more realistic complexity estimate.
Export
BibTeX
@article{TevsEG2011, TITLE = {Intrinsic Shape Matching by Planned Landmark Sampling}, AUTHOR = {Tevs, Art and Berner, Alexander and Wand, Michael and Ihrke, Ivo and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1111/j.1467-8659.2011.01879.x}, PUBLISHER = {Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2011}, DATE = {2011}, ABSTRACT = {Recently, the problem of intrinsic shape matching has received a lot of attention. A number of algorithms have been proposed, among which random-sampling-based techniques have been particularly successful due to their generality and efficiency. We introduce a new sampling-based shape matching algorithm that uses a planning step to find optimized "landmark" points. These points are matched first in order to maximize the information gained and thus minimize the sampling costs. Our approach makes three main contributions: First, the new technique leads to a significant improvement in performance, which we demonstrate on a number of benchmark scenarios. Second, our technique does not require any keypoint detection. This is often a significant limitation for models that do not show sufficient surface features. Third, we examine the actual numerical degrees of freedom of the matching problem for a given piece of geometry. In contrast to previous results, our estimates take into account unprecise geodesics and potentially numerically unfavorable geometry of general topology, giving a more realistic complexity estimate.}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {30}, NUMBER = {2}, PAGES = {543--552}, BOOKTITLE = {EUROGRAPHICS 2011}, EDITOR = {Chen, Min and Deussen, Oliver}, }
Endnote
%0 Journal Article %A Tevs, Art %A Berner, Alexander %A Wand, Michael %A Ihrke, Ivo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Intrinsic Shape Matching by Planned Landmark Sampling : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13C7-0 %F EDOC: 618855 %R 10.1111/j.1467-8659.2011.01879.x %D 2011 %* Review method: peer-reviewed %X Recently, the problem of intrinsic shape matching has received a lot of attention. A number of algorithms have been proposed, among which random-sampling-based techniques have been particularly successful due to their generality and efficiency. We introduce a new sampling-based shape matching algorithm that uses a planning step to find optimized "landmark" points. These points are matched first in order to maximize the information gained and thus minimize the sampling costs. Our approach makes three main contributions: First, the new technique leads to a significant improvement in performance, which we demonstrate on a number of benchmark scenarios. Second, our technique does not require any keypoint detection. This is often a significant limitation for models that do not show sufficient surface features. Third, we examine the actual numerical degrees of freedom of the matching problem for a given piece of geometry. In contrast to previous results, our estimates take into account unprecise geodesics and potentially numerically unfavorable geometry of general topology, giving a more realistic complexity estimate. %J Computer Graphics Forum %V 30 %N 2 %& 543 %P 543 - 552 %I Blackwell %C Oxford, UK %B EUROGRAPHICS 2011 %O EUROGRAPHICS 2011 The European Association for Computer Graphics 32nd Annual Conference ; Llandudno in Wales, UK, April 11th - 15th, 2011 EG 2011
Templin, K., Didyk, P., Ritschel, T., Eisemann, E., Myszkowski, K., and Seidel, H.-P. 2011. Apparent Resolution Enhancement for Animations. Proceedings SCCG 2011 (SSCG 2011), ACM.
Abstract
Presenting the variety of high resolution images captured by high-quality <br>devices, or generated on the computer, is challenging due to the limited <br>resolution of current display devices. Our recent work addressed this problem <br>by taking into account human perception. By applying a specific motion to a <br>high-resolution image shown on a low-resolution display device, human eye <br>tracking and integration could be exploited to achieve apparent resolution <br>enhancement. To this end, the high-resolution image is decomposed into a <br>sequence of temporally varying low-resolution images that are displayed at high <br>refresh rates. However, this approach is limited to a specific class of simple <br>or constant movements, i.e. ``panning''. In this work, we generalize this idea <br>to arbitrary motions, as well as to videos with arbitrary motion flow. The <br>resulting image sequences are compared to a range of other down-sampling <br>methods.
Export
BibTeX
@inproceedings{Templin2011, TITLE = {Apparent Resolution Enhancement for Animations}, AUTHOR = {Templin, Krzysztof and Didyk, Piotr and Ritschel, Tobias and Eisemann, Elmar and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4503-1978-2}, DOI = {10.1145/2461217.2461230}, PUBLISHER = {ACM}, YEAR = {2011}, DATE = {2011}, ABSTRACT = {Presenting the variety of high resolution images captured by high-quality <br>devices, or generated on the computer, is challenging due to the limited <br>resolution of current display devices. Our recent work addressed this problem <br>by taking into account human perception. By applying a specific motion to a <br>high-resolution image shown on a low-resolution display device, human eye <br>tracking and integration could be exploited to achieve apparent resolution <br>enhancement. To this end, the high-resolution image is decomposed into a <br>sequence of temporally varying low-resolution images that are displayed at high <br>refresh rates. However, this approach is limited to a specific class of simple <br>or constant movements, i.e. ``panning''. In this work, we generalize this idea <br>to arbitrary motions, as well as to videos with arbitrary motion flow. The <br>resulting image sequences are compared to a range of other down-sampling <br>methods.}, BOOKTITLE = {Proceedings SCCG 2011 (SSCG 2011)}, EDITOR = {Nishita, Tomoyuki and {\v D}urikovi{\v c}, Roman}, PAGES = {57--64}, ADDRESS = {Vini{\v c}n{\'e}, Slovakia}, }
Endnote
%0 Conference Proceedings %A Templin, Krzysztof %A Didyk, Piotr %A Ritschel, Tobias %A Eisemann, Elmar %A Myszkowski, Karol %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Apparent Resolution Enhancement for Animations : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-138B-9 %F EDOC: 618886 %R 10.1145/2461217.2461230 %D 2011 %B 27th Spring Conference on Computer Graphics %Z date of event: 2011-04-28 - 2011-04-30 %C Vini&#269;n&#233;, Slovakia %X Presenting the variety of high resolution images captured by high-quality <br>devices, or generated on the computer, is challenging due to the limited <br>resolution of current display devices. Our recent work addressed this problem <br>by taking into account human perception. By applying a specific motion to a <br>high-resolution image shown on a low-resolution display device, human eye <br>tracking and integration could be exploited to achieve apparent resolution <br>enhancement. To this end, the high-resolution image is decomposed into a <br>sequence of temporally varying low-resolution images that are displayed at high <br>refresh rates. However, this approach is limited to a specific class of simple <br>or constant movements, i.e. ``panning''. In this work, we generalize this idea <br>to arbitrary motions, as well as to videos with arbitrary motion flow. The <br>resulting image sequences are compared to a range of other down-sampling <br>methods. %B Proceedings SCCG 2011 %E Nishita, Tomoyuki; &#270;urikovi&#269;, Roman %P 57 - 64 %I ACM %@ 978-1-4503-1978-2
Tautges, J., Zinke, A., Krüger, B., et al. 2011. Motion Reconstruction Using Sparse Accelerometer Data. ACM Transactions on Graphics30, 3.
Export
BibTeX
@article{MotionReconstruction_TOG, TITLE = {Motion Reconstruction Using Sparse Accelerometer Data}, AUTHOR = {Tautges, Jochen and Zinke, Arno and Kr{\"u}ger, Bj{\"o}rn and Baumann, Jan and Weber, Andreas and Helten, Thomas and M{\"u}ller, Meinard and Seidel, Hans-Peter and Eberhardt, Bernd}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/1966394.1966397}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2011}, DATE = {2011}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {30}, NUMBER = {3}, PAGES = {1--12}, EID = {18}, }
Endnote
%0 Journal Article %A Tautges, Jochen %A Zinke, Arno %A Kr&#252;ger, Bj&#246;rn %A Baumann, Jan %A Weber, Andreas %A Helten, Thomas %A M&#252;ller, Meinard %A Seidel, Hans-Peter %A Eberhardt, Bernd %+ External Organizations External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Motion Reconstruction Using Sparse Accelerometer Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13D8-A %F EDOC: 618860 %R 10.1145/1966394.1966397 %7 2011 %D 2011 %* Review method: peer-reviewed %J ACM Transactions on Graphics %V 30 %N 3 %& 1 %P 1 - 12 %Z sequence number: 18 %I ACM %C New York, NY %@ false
Sunkel, M., Jansen, S., Wand, M., Eisemann, E., and Seidel, H.-P. 2011. Learning Line Features in 3D Geometry. Computer Graphics Forum (Proc. EUROGRAPHICS 2011)30, 2.
Export
BibTeX
@article{SunkelEG2011, TITLE = {Learning Line Features in {3D} Geometry}, AUTHOR = {Sunkel, Martin and Jansen, Silke and Wand, Michael and Eisemann, Elmar and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1111/j.1467-8659.2011.01858.x}, PUBLISHER = {Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2011}, DATE = {2011}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {30}, NUMBER = {2}, PAGES = {267--276}, BOOKTITLE = {EUROGRAPHICS 2011}, EDITOR = {Chen, Min and Deussen, Olivewr}, }
Endnote
%0 Journal Article %A Sunkel, Martin %A Jansen, Silke %A Wand, Michael %A Eisemann, Elmar %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Learning Line Features in 3D Geometry : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13CD-4 %F EDOC: 618856 %R 10.1111/j.1467-8659.2011.01858.x %7 2011 %D 2011 %* Review method: peer-reviewed %J Computer Graphics Forum %V 30 %N 2 %& 267 %P 267 - 276 %I Blackwell %C Oxford, UK %B EUROGRAPHICS 2011 %O EUROGRAPHICS 2011 EG 2011 The European Association for Computer Graphics 32nd Annual Conference ; Llandudno in Wales, UK, April 11th - 15th, 2011
Strzodka, R., Shaheen, M., Pajak, D., and Seidel, H.-P. 2011. Cache Accurate Time Skewing in Iterative Stencil Computations. Proceedings of the 2011 International Conference on Parallel Processing (ICPP 2011), IEEE Computer Society.
Export
BibTeX
@inproceedings{DBLP:conf/icpp/StrzodkaSPS11, TITLE = {Cache Accurate Time Skewing in Iterative Stencil Computations}, AUTHOR = {Strzodka, Robert and Shaheen, Mohammed and Pajak, Dawid and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4577-1336-1}, DOI = {10.1109/ICPP.2011.47}, PUBLISHER = {IEEE Computer Society}, YEAR = {2011}, DATE = {2011}, BOOKTITLE = {Proceedings of the 2011 International Conference on Parallel Processing (ICPP 2011)}, EDITOR = {Gao, Guang R. and Tseng, Yu-Chee}, PAGES = {571--581}, ADDRESS = {Taipei, Taiwan}, }
Endnote
%0 Conference Proceedings %A Strzodka, Robert %A Shaheen, Mohammed %A Pajak, Dawid %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Cache Accurate Time Skewing in Iterative Stencil Computations : %G eng %U http://hdl.handle.net/21.11116/0000-000F-5B4E-5 %R 10.1109/ICPP.2011.47 %D 2011 %B 2011 International Conference on Parallel Processing %Z date of event: 2011-09-13 - 2011-09-16 %C Taipei, Taiwan %B Proceedings of the 2011 International Conference on Parallel Processing %E Gao, Guang R.; Tseng, Yu-Chee %P 571 - 581 %I IEEE Computer Society %@ 978-1-4577-1336-1
Stoll, C., Hasler, N., Gall, J., Seidel, H.-P., and Theobalt, C. 2011. Fast Articulated Motion Tracking using a Sums of Gaussians Body Model. IEEE International Conference on Computer Vision (ICCV 2011), IEEE.
Export
BibTeX
@inproceedings{Stoll2011, TITLE = {Fast Articulated Motion Tracking using a Sums of {Gaussians} Body Model}, AUTHOR = {Stoll, Carsten and Hasler, Nils and Gall, Juergen and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-4577-1101-5}, DOI = {10.1109/ICCV.2011.6126338}, PUBLISHER = {IEEE}, YEAR = {2011}, DATE = {2011}, BOOKTITLE = {IEEE International Conference on Computer Vision (ICCV 2011)}, PAGES = {951--958}, ADDRESS = {Barcelona, Spain}, }
Endnote
%0 Conference Proceedings %A Stoll, Carsten %A Hasler, Nils %A Gall, Juergen %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Fast Articulated Motion Tracking using a Sums of Gaussians Body Model : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13BD-A %F EDOC: 618892 %R 10.1109/ICCV.2011.6126338 %D 2011 %B IEEE International Conference on Computer Vision %Z date of event: 2011-11-06 - 2011-11-13 %C Barcelona, Spain %B IEEE International Conference on Computer Vision %P 951 - 958 %I IEEE %@ 978-1-4577-1101-5
Scherbaum, K., Ritschel, T., Hullin, M., Thormählen, T., Blanz, V., and Seidel, H.-P. 2011. Computer-suggested Facial Makeup. Computer Graphics Forum (Proc. EUROGRAPHICS 2011)30, 2.
Export
BibTeX
@article{Scherbaum2011makeup, TITLE = {Computer-suggested Facial Makeup}, AUTHOR = {Scherbaum, Kristina and Ritschel, Tobias and Hullin, Matthias and Thorm{\"a}hlen, Thorsten and Blanz, Volker and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1111/j.1467-8659.2011.01874.x}, PUBLISHER = {Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2011}, DATE = {2011}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {30}, NUMBER = {2}, PAGES = {485--492}, BOOKTITLE = {EUROGRAPHICS 2011}, EDITOR = {Chen, Min and Deussen, Oliver}, }
Endnote
%0 Journal Article %A Scherbaum, Kristina %A Ritschel, Tobias %A Hullin, Matthias %A Thorm&#228;hlen, Thorsten %A Blanz, Volker %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Computer-suggested Facial Makeup : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13AA-3 %F EDOC: 618869 %R 10.1111/j.1467-8659.2011.01874.x %7 2011 %D 2011 %* Review method: peer-reviewed %J Computer Graphics Forum %V 30 %N 2 %& 485 %P 485 - 492 %I Blackwell %C Oxford, UK %B EUROGRAPHICS 2011 %O EUROGRAPHICS 2011 The European Association for Computer Graphics 32nd Annual Conference ; Llandudno in Wales, UK, April 11th - 15th, 2011 EG 2011
Saleem, W., Belyaev, A., Wang, D., and Seidel, H.-P. 2011. On Visual Complexity of 3D Shapes. Computers & Graphics35, 3.
Export
BibTeX
@article{Saleem2011, TITLE = {On Visual Complexity of {3D} Shapes}, AUTHOR = {Saleem, Waqar and Belyaev, Alexander and Wang, Danyi and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0097-8493}, DOI = {10.1016/j.cag.2011.03.006}, PUBLISHER = {Elsevier}, ADDRESS = {Amsterdam}, YEAR = {2011}, DATE = {2011}, JOURNAL = {Computers \& Graphics}, VOLUME = {35}, NUMBER = {3}, PAGES = {580--585}, }
Endnote
%0 Journal Article %A Saleem, Waqar %A Belyaev, Alexander %A Wang, Danyi %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T On Visual Complexity of 3D Shapes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13E1-3 %F EDOC: 618927 %R 10.1016/j.cag.2011.03.006 %7 2011 %D 2011 %* Review method: peer-reviewed %J Computers & Graphics %V 35 %N 3 %& 580 %P 580 - 585 %I Elsevier %C Amsterdam %@ false
Ritschel, T., Eisemann, E., Ha, I., Kim, J.D.K., and Seidel, H.-P. 2011. Making Imperfect Shadow Maps View-adaptive: High-quality Global Illumination in Large Dynamic Scenes. Computer Graphics Forum30, 8.
Export
BibTeX
@article{Ritschel2011, TITLE = {Making Imperfect Shadow Maps View-adaptive: High-quality Global Illumination in Large Dynamic Scenes}, AUTHOR = {Ritschel, Tobias and Eisemann, Elmar and Ha, Inwoo and Kim, James D. K. and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2011.01998.x}, PUBLISHER = {Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2011}, DATE = {2011}, JOURNAL = {Computer Graphics Forum}, VOLUME = {30}, NUMBER = {8}, PAGES = {2258--2269}, }
Endnote
%0 Journal Article %A Ritschel, Tobias %A Eisemann, Elmar %A Ha, Inwoo %A Kim, James D. K. %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Making Imperfect Shadow Maps View-adaptive: High-quality Global Illumination in Large Dynamic Scenes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13CF-F %F EDOC: 618926 %R 10.1111/j.1467-8659.2011.01998.x %7 2011 %D 2011 %* Review method: peer-reviewed %J Computer Graphics Forum %V 30 %N 8 %& 2258 %P 2258 - 2269 %I Blackwell %C Oxford, UK %@ false
Reshetouski, I., Manakov, A., Seidel, H.-P., and Ihrke, I. 2011. Three-dimensional Kaleidoscopic Imaging. IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2011), IEEE.
Export
BibTeX
@inproceedings{Reshetouski2011, TITLE = {Three-dimensional Kaleidoscopic Imaging}, AUTHOR = {Reshetouski, Ilya and Manakov, Alkhazur and Seidel, Hans-Peter and Ihrke, Ivo}, LANGUAGE = {eng}, ISBN = {978-1-4577-0394-2}, DOI = {10.1109/CVPR.2011.5995579}, PUBLISHER = {IEEE}, YEAR = {2011}, DATE = {2011}, BOOKTITLE = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2011)}, EDITOR = {Felzenszwalb, Pedro and Forsyth, David and Fua, Pascal}, PAGES = {353--360}, ADDRESS = {Colorado Springs, CO, USA}, }
Endnote
%0 Conference Proceedings %A Reshetouski, Ilya %A Manakov, Alkhazur %A Seidel, Hans-Peter %A Ihrke, Ivo %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Three-dimensional Kaleidoscopic Imaging : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13CB-8 %F EDOC: 618868 %R 10.1109/CVPR.2011.5995579 %D 2011 %B IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2011-06-20 - 2011-06-25 %C Colorado Springs, CO, USA %B IEEE Conference on Computer Vision and Pattern Recognition %E Felzenszwalb, Pedro; Forsyth, David; Fua, Pascal %P 353 - 360 %I IEEE %@ 978-1-4577-0394-2
Pons-Moll, G., Baak, A., Gall, J., et al. 2011. Outdoor Human Motion Capture using Inverse Kinematics and von Mises-Fisher Sampling. IEEE International Conference on Computer Vision (ICCV 2011), IEEE.
Export
BibTeX
@inproceedings{PonsMollBaGaMuSeRo2011_OutdoorMocap_ICCV, TITLE = {Outdoor Human Motion Capture using Inverse Kinematics and von {Mises}-{Fisher} Sampling}, AUTHOR = {Pons-Moll, Gerard and Baak, Andreas and Gall, J{\"u}rgen and Leal-Taix{\'e}, Laura and M{\"u}ller, Meinard and Seidel, Hans-Peter and Rosenhahn, Bodo}, LANGUAGE = {eng}, ISBN = {978-1-4577-1101-5}, DOI = {10.1109/ICCV.2011.6126375}, PUBLISHER = {IEEE}, YEAR = {2011}, DATE = {2011}, BOOKTITLE = {IEEE International Conference on Computer Vision (ICCV 2011)}, PAGES = {1243--1250}, ADDRESS = {Barcelona, Spain}, }
Endnote
%0 Conference Proceedings %A Pons-Moll, Gerard %A Baak, Andreas %A Gall, J&#252;rgen %A Leal-Taix&#233;, Laura %A M&#252;ller, Meinard %A Seidel, Hans-Peter %A Rosenhahn, Bodo %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Outdoor Human Motion Capture using Inverse Kinematics and von Mises-Fisher Sampling : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13E4-E %F EDOC: 618876 %R 10.1109/ICCV.2011.6126375 %D 2011 %B IEEE International Conference on Computer Vision %Z date of event: 2011-11-06 - 2011-11-13 %C Barcelona, Spain %B IEEE International Conference on Computer Vision %P 1243 - 1250 %I IEEE %@ 978-1-4577-1101-5
Pajak, D., Herzog, R., Myszkowski, K., Eisemann, E., and Seidel, H.-P. 2011. Scalable Remote Rendering with Depth and Motion-flow Augmented Streaming. Computer Graphics Forum (Proc. EUROGPRAPHICS 2011)30, 2.
Abstract
In this work, we focus on efficient compression and streaming of frames rendered from a dynamic 3D model. Remote rendering and on-the-fly streaming become increasingly attractive for interactive applications. Data is kept confidential and only images are sent to the client. Even if the client's hardware resources are modest, the user can interact with state-of-the-art rendering applications executed on the server. Our solution focuses on augmented video information, e.g., by depth, which is key to increase robustness with respect to data loss, image reconstruction, and is an important feature for stereo vision and other client-side applications. Two major challenges arise in such a setup. First, the server workload has to be controlled to support many clients, second the data transfer needs to be efficient. Consequently, our contributions are twofold. First, we reduce the server-based computations by making use of sparse sampling and temporal consistency to avoid expensive pixel evaluations. Second, our data-transfer solution takes limited bandwidths into account, is robust to information loss, and compression and decompression are efficient enough to support real-time interaction. Our key insight is to tailor our method explicitly for rendered 3D content and shift some computations on client GPUs, to better balance the server/client workload. Our framework is progressive, scalable, and allows us to stream augmented high-resolution (e.g., HD-ready) frames with small bandwidth on standard hardware.
Export
BibTeX
@article{HerzogEG2011, TITLE = {Scalable Remote Rendering with Depth and Motion-flow Augmented Streaming}, AUTHOR = {Pajak, Dawid and Herzog, Robert and Myszkowski, Karol and Eisemann, Elmar and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1111/j.1467-8659.2011.01871.x}, PUBLISHER = {Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2011}, DATE = {2011}, ABSTRACT = {In this work, we focus on efficient compression and streaming of frames rendered from a dynamic 3D model. Remote rendering and on-the-fly streaming become increasingly attractive for interactive applications. Data is kept confidential and only images are sent to the client. Even if the client's hardware resources are modest, the user can interact with state-of-the-art rendering applications executed on the server. Our solution focuses on augmented video information, e.g., by depth, which is key to increase robustness with respect to data loss, image reconstruction, and is an important feature for stereo vision and other client-side applications. Two major challenges arise in such a setup. First, the server workload has to be controlled to support many clients, second the data transfer needs to be efficient. Consequently, our contributions are twofold. First, we reduce the server-based computations by making use of sparse sampling and temporal consistency to avoid expensive pixel evaluations. Second, our data-transfer solution takes limited bandwidths into account, is robust to information loss, and compression and decompression are efficient enough to support real-time interaction. Our key insight is to tailor our method explicitly for rendered 3D content and shift some computations on client GPUs, to better balance the server/client workload. Our framework is progressive, scalable, and allows us to stream augmented high-resolution (e.g., HD-ready) frames with small bandwidth on standard hardware.}, JOURNAL = {Computer Graphics Forum (Proc. EUROGPRAPHICS)}, VOLUME = {30}, NUMBER = {2}, PAGES = {415--424}, BOOKTITLE = {EUROGRAPHICS 2011 (EUROGPRAPHICS 2011)}, EDITOR = {Chen, Min and Deussen, Oliver}, }
Endnote
%0 Journal Article %A Pajak, Dawid %A Herzog, Robert %A Myszkowski, Karol %A Eisemann, Elmar %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Scalable Remote Rendering with Depth and Motion-flow Augmented Streaming : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13F2-E %F EDOC: 618866 %R 10.1111/j.1467-8659.2011.01871.x %7 2011 %D 2011 %* Review method: peer-reviewed %X In this work, we focus on efficient compression and streaming of frames rendered from a dynamic 3D model. Remote rendering and on-the-fly streaming become increasingly attractive for interactive applications. Data is kept confidential and only images are sent to the client. Even if the client's hardware resources are modest, the user can interact with state-of-the-art rendering applications executed on the server. Our solution focuses on augmented video information, e.g., by depth, which is key to increase robustness with respect to data loss, image reconstruction, and is an important feature for stereo vision and other client-side applications. Two major challenges arise in such a setup. First, the server workload has to be controlled to support many clients, second the data transfer needs to be efficient. Consequently, our contributions are twofold. First, we reduce the server-based computations by making use of sparse sampling and temporal consistency to avoid expensive pixel evaluations. Second, our data-transfer solution takes limited bandwidths into account, is robust to information loss, and compression and decompression are efficient enough to support real-time interaction. Our key insight is to tailor our method explicitly for rendered 3D content and shift some computations on client GPUs, to better balance the server/client workload. Our framework is progressive, scalable, and allows us to stream augmented high-resolution (e.g., HD-ready) frames with small bandwidth on standard hardware. %J Computer Graphics Forum %V 30 %N 2 %& 415 %P 415 - 424 %I Blackwell %C Oxford, UK %B EUROGRAPHICS 2011 %O EUROGPRAPHICS 2011 The European Association for Computer Graphics 32nd Annual Conference ; Llandudno in Wales, UK, April 11th - 15th, 2011 EG 2011
Manakov, A., Seidel, H.-P., and Ihrke, I. 2011. A Mathematical Model and Calibration Procedure for Galvanometric Laser Scanning Systems. Vision, Modeling, and Visualization (VMV 2011), Eurographics Association.
Export
BibTeX
@inproceedings{Manakov2011, TITLE = {A Mathematical Model and Calibration Procedure for Galvanometric Laser Scanning Systems}, AUTHOR = {Manakov, Alkhazur and Seidel, Hans-Peter and Ihrke, Ivo}, LANGUAGE = {eng}, ISBN = {978-3-905673-85-2}, DOI = {10.2312/PE/VMV/VMV11/207-214}, PUBLISHER = {Eurographics Association}, YEAR = {2011}, DATE = {2011}, BOOKTITLE = {Vision, Modeling, and Visualization (VMV 2011)}, EDITOR = {Eisert, Peter and Hornegger, Joachim and Polthier, Konrad}, PAGES = {207--214}, ADDRESS = {Berlin, Germany}, }
Endnote
%0 Conference Proceedings %A Manakov, Alkhazur %A Seidel, Hans-Peter %A Ihrke, Ivo %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Mathematical Model and Calibration Procedure for Galvanometric Laser Scanning Systems : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-1381-E %F EDOC: 618883 %R 10.2312/PE/VMV/VMV11/207-214 %D 2011 %B 16th International Workshop on Vision, Modeling and Visualization %Z date of event: 2011-10-04 - 2011-10-06 %C Berlin, Germany %B Vision, Modeling, and Visualization %E Eisert, Peter; Hornegger, Joachim; Polthier, Konrad %P 207 - 214 %I Eurographics Association %@ 978-3-905673-85-2
Liu, Y., Stoll, C., Gall, J., Seidel, H.-P., and Theobalt, C. 2011. Markerless Motion Capture of Interacting Characters Using Multi-view Image Segmentation. IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2011), IEEE.
Export
BibTeX
@inproceedings{LiuCVPR2011, TITLE = {Markerless Motion Capture of Interacting Characters Using Multi-view Image Segmentation}, AUTHOR = {Liu, Yebin and Stoll, Carsten and Gall, J{\"u}rgen and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-4577-0394-2}, DOI = {10.1109/CVPR.2011.5995424}, PUBLISHER = {IEEE}, YEAR = {2011}, DATE = {2011}, BOOKTITLE = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2011)}, EDITOR = {Felzenszwalb, Pedro and Forsyth, David and Fua, Pascal}, PAGES = {1249--1256}, ADDRESS = {Colorado Springs, CO, USA}, }
Endnote
%0 Conference Proceedings %A Liu, Yebin %A Stoll, Carsten %A Gall, J&#252;rgen %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Markerless Motion Capture of Interacting Characters Using Multi-view Image Segmentation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13D1-7 %F EDOC: 618867 %R 10.1109/CVPR.2011.5995424 %D 2011 %B IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2011-06-20 - 2011-06-25 %C Colorado Springs, CO, USA %B IEEE Conference on Computer Vision and Pattern Recognition %E Felzenszwalb, Pedro; Forsyth, David; Fua, Pascal %P 1249 - 1256 %I IEEE %@ 978-1-4577-0394-2
Lasowski, R., Tevs, A., Wand, M., and Seidel, H.-P. 2011. Wavelet Belief Propagation for Large Scale Inference Problems. IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2011), IEEE.
Export
BibTeX
@inproceedings{LaTeWaSeCVPR11, TITLE = {Wavelet Belief Propagation for Large Scale Inference Problems}, AUTHOR = {Lasowski, Ruxandra and Tevs, Art and Wand, Michael and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4577-0394-2}, DOI = {10.1109/CVPR.2011.5995489}, PUBLISHER = {IEEE}, YEAR = {2011}, DATE = {2011}, BOOKTITLE = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2011)}, EDITOR = {Felzenszwalb, Pedro and Forsyth, David and Fua, Pascal}, PAGES = {1921--1928}, ADDRESS = {Colorado Springs, CO, USA}, }
Endnote
%0 Conference Proceedings %A Lasowski, Ruxandra %A Tevs, Art %A Wand, Michael %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Wavelet Belief Propagation for Large Scale Inference Problems : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-141B-C %F EDOC: 618865 %R 10.1109/CVPR.2011.5995489 %D 2011 %B IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2011-06-20 - 2011-06-25 %C Colorado Springs, CO, USA %B IEEE Conference on Computer Vision and Pattern Recognition %E Felzenszwalb, Pedro; Forsyth, David; Fua, Pascal %P 1921 - 1928 %I IEEE %@ 978-1-4577-0394-2
Kurz, C., Thormählen, T., and Seidel, H.-P. 2011a. Visual Fixation for 3D Video Stabilization. Journal of Virtual Reality and Broadcasting8, 2.
Abstract
Visual fixation is employed by humans and some animals to keep a specific 3D <br>location at the center of the visual gaze. Inspired by this phenomenon in <br>nature, this paper explores the idea to transfer this mechanism to the context <br>of video stabilization for a hand-held video camera. A novel approach is <br>presented that stabilizes a video by fixating on automatically extracted 3D <br>target points. This approach is different from existing automatic solutions <br>that stabilize the video by smoothing. To determine the 3D target points, the <br>recorded scene is analyzed with a state-of-the-art structure-from-motion <br>algorithm, which estimates camera motion and reconstructs a 3D point cloud of <br>the static scene objects. Special algorithms are presented that search either <br>virtual or real 3D target points, which back-project close to the center of the <br>image for as long a period of time as possible. The stabilization algorithm <br>then transforms the original images of the sequence so that these 3D target <br>points are kept exactly in the center of the image, which, in case of real 3D <br>target points, produces a perfectly stable result at the image center.<br>Furthermore, different methods of additional user interaction are investigated. <br>It is shown that the stabilization process can easily be controlled and that it <br>can be combined with state-of-the-art tracking techniques in order to obtain a <br>powerful image stabilization tool.<br>The approach is evaluated on a variety of videos taken with a hand-held camera <br>in natural scenes.
Export
BibTeX
@article{Kurz2010jvrb, TITLE = {Visual Fixation for {3D} Video Stabilization}, AUTHOR = {Kurz, Christian and Thorm{\"a}hlen, Thorsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1860-2037}, URL = {urn:nbn:de:0009-6-28222}, DOI = {10.20385/1860-2037/8.2011.2}, YEAR = {2011}, ABSTRACT = {Visual fixation is employed by humans and some animals to keep a specific 3D <br>location at the center of the visual gaze. Inspired by this phenomenon in <br>nature, this paper explores the idea to transfer this mechanism to the context <br>of video stabilization for a hand-held video camera. A novel approach is <br>presented that stabilizes a video by fixating on automatically extracted 3D <br>target points. This approach is different from existing automatic solutions <br>that stabilize the video by smoothing. To determine the 3D target points, the <br>recorded scene is analyzed with a state-of-the-art structure-from-motion <br>algorithm, which estimates camera motion and reconstructs a 3D point cloud of <br>the static scene objects. Special algorithms are presented that search either <br>virtual or real 3D target points, which back-project close to the center of the <br>image for as long a period of time as possible. The stabilization algorithm <br>then transforms the original images of the sequence so that these 3D target <br>points are kept exactly in the center of the image, which, in case of real 3D <br>target points, produces a perfectly stable result at the image center.<br>Furthermore, different methods of additional user interaction are investigated. <br>It is shown that the stabilization process can easily be controlled and that it <br>can be combined with state-of-the-art tracking techniques in order to obtain a <br>powerful image stabilization tool.<br>The approach is evaluated on a variety of videos taken with a hand-held camera <br>in natural scenes.}, JOURNAL = {Journal of Virtual Reality and Broadcasting}, VOLUME = {8}, NUMBER = {2}, PAGES = {1--12}, }
Endnote
%0 Journal Article %A Kurz, Christian %A Thorm&#228;hlen, Thorsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Visual Fixation for 3D Video Stabilization : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-1419-0 %F EDOC: 618853 %U urn:nbn:de:0009-6-28222 %R 10.20385/1860-2037/8.2011.2 %7 2011-01-31 %D 2011 %8 31.01.2011 %X Visual fixation is employed by humans and some animals to keep a specific 3D <br>location at the center of the visual gaze. Inspired by this phenomenon in <br>nature, this paper explores the idea to transfer this mechanism to the context <br>of video stabilization for a hand-held video camera. A novel approach is <br>presented that stabilizes a video by fixating on automatically extracted 3D <br>target points. This approach is different from existing automatic solutions <br>that stabilize the video by smoothing. To determine the 3D target points, the <br>recorded scene is analyzed with a state-of-the-art structure-from-motion <br>algorithm, which estimates camera motion and reconstructs a 3D point cloud of <br>the static scene objects. Special algorithms are presented that search either <br>virtual or real 3D target points, which back-project close to the center of the <br>image for as long a period of time as possible. The stabilization algorithm <br>then transforms the original images of the sequence so that these 3D target <br>points are kept exactly in the center of the image, which, in case of real 3D <br>target points, produces a perfectly stable result at the image center.<br>Furthermore, different methods of additional user interaction are investigated. <br>It is shown that the stabilization process can easily be controlled and that it <br>can be combined with state-of-the-art tracking techniques in order to obtain a <br>powerful image stabilization tool.<br>The approach is evaluated on a variety of videos taken with a hand-held camera <br>in natural scenes. %J Journal of Virtual Reality and Broadcasting %V 8 %N 2 %& 1 %P 1 - 12 %@ false
Kurz, C., Thormählen, T., and Seidel, H.-P. 2011b. Bundle Adjustment for Stereoscopic 3D. Computer Vision / Computer Graphics Collaboration Techniques (MIRAGE 2011), Springer.
Abstract
The recent resurgence of stereoscopic 3D films has triggered a high demand for <br>post-processing tools for stereoscopic image sequences.<br>Camera motion estimation, also known as structure-from-motion (SfM) or <br>match-moving, is an essential step in the post-processing pipeline. In order to <br>ensure a high accuracy of the estimated camera parameters, a bundle adjustment <br>algorithm should be employed. We present a new stereo camera model for bundle <br>adjustment. It is designed to be applicable to a wide range of cameras employed <br>in today's movie productions. In addition, we describe how the model can be <br>integrated efficiently into the sparse bundle adjustment framework, enabling <br>the processing of stereoscopic image sequences with traditional efficiency and <br>improved accuracy.<br>Our camera model is validated by synthetic experiments, on rendered sequences, <br>and on a variety of real-world video sequences.
Export
BibTeX
@inproceedings{Kurz2011, TITLE = {Bundle Adjustment for Stereoscopic {3D}}, AUTHOR = {Kurz, Christian and Thorm{\"a}hlen, Thorsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-642-24135-2}, DOI = {10.1007/978-3-642-24136-9_1}, PUBLISHER = {Springer}, YEAR = {2011}, DATE = {2011}, ABSTRACT = {The recent resurgence of stereoscopic 3D films has triggered a high demand for <br>post-processing tools for stereoscopic image sequences.<br>Camera motion estimation, also known as structure-from-motion (SfM) or <br>match-moving, is an essential step in the post-processing pipeline. In order to <br>ensure a high accuracy of the estimated camera parameters, a bundle adjustment <br>algorithm should be employed. We present a new stereo camera model for bundle <br>adjustment. It is designed to be applicable to a wide range of cameras employed <br>in today's movie productions. In addition, we describe how the model can be <br>integrated efficiently into the sparse bundle adjustment framework, enabling <br>the processing of stereoscopic image sequences with traditional efficiency and <br>improved accuracy.<br>Our camera model is validated by synthetic experiments, on rendered sequences, <br>and on a variety of real-world video sequences.}, BOOKTITLE = {Computer Vision / Computer Graphics Collaboration Techniques (MIRAGE 2011)}, EDITOR = {Gagalowicz, Andr{\'e} and Philips, Wilfried}, PAGES = {1--12}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {6930}, ADDRESS = {Rocquencourt, France}, }
Endnote
%0 Conference Proceedings %A Kurz, Christian %A Thorm&#228;hlen, Thorsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Bundle Adjustment for Stereoscopic 3D : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-139D-1 %F EDOC: 618888 %R 10.1007/978-3-642-24136-9_1 %D 2011 %B 5th International Conference on Computer Vision / Computer Graphics Collaboration Techniques and Applications %Z date of event: 2011-10-10 - 2011-10-11 %C Rocquencourt, France %X The recent resurgence of stereoscopic 3D films has triggered a high demand for <br>post-processing tools for stereoscopic image sequences.<br>Camera motion estimation, also known as structure-from-motion (SfM) or <br>match-moving, is an essential step in the post-processing pipeline. In order to <br>ensure a high accuracy of the estimated camera parameters, a bundle adjustment <br>algorithm should be employed. We present a new stereo camera model for bundle <br>adjustment. It is designed to be applicable to a wide range of cameras employed <br>in today's movie productions. In addition, we describe how the model can be <br>integrated efficiently into the sparse bundle adjustment framework, enabling <br>the processing of stereoscopic image sequences with traditional efficiency and <br>improved accuracy.<br>Our camera model is validated by synthetic experiments, on rendered sequences, <br>and on a variety of real-world video sequences. %B Computer Vision / Computer Graphics Collaboration Techniques %E Gagalowicz, Andr&#233;; Philips, Wilfried %P 1 - 12 %I Springer %@ 978-3-642-24135-2 %B Lecture Notes in Computer Science %N 6930 %U https://rdcu.be/dJxGq
Kosov, S., Thormählen, T., and Seidel, H.-P. 2011. Using Active Illumination for Accurate Variational Space-time Stereo. Image Analysis (SCIA 2011), Springer.
Export
BibTeX
@inproceedings{DBLP:conf/scia/KosovTS11, TITLE = {Using Active Illumination for Accurate Variational Space-time Stereo}, AUTHOR = {Kosov, Sergey and Thorm{\"a}hlen, Thorsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-642-21226-0}, DOI = {10.1007/978-3-642-21227-7_70}, PUBLISHER = {Springer}, YEAR = {2011}, DATE = {2011}, BOOKTITLE = {Image Analysis (SCIA 2011)}, EDITOR = {Heyden, Anders and Kahl, Fredrik}, PAGES = {752--763}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {6688}, ADDRESS = {Ystad, Sweden}, }
Endnote
%0 Conference Proceedings %A Kosov, Sergey %A Thorm&#228;hlen, Thorsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Using Active Illumination for Accurate Variational Space-time Stereo : %G eng %U http://hdl.handle.net/21.11116/0000-000F-5B42-1 %R 10.1007/978-3-642-21227-7_70 %D 2011 %B 17th Scandinavian Conference on Image Analysis %Z date of event: 2011-05-01 - 2011-05-01 %C Ystad, Sweden %B Image Analysis %E Heyden, Anders; Kahl, Fredrik %P 752 - 763 %I Springer %@ 978-3-642-21226-0 %B Lecture Notes in Computer Science %N 6688 %U https://rdcu.be/dJxAW
Klehm, O., Ritschel, T., Eisemann, E., and Seidel, H.-P. 2011. Bent Normals and Cones in Screen-space. Vision, Modeling, and Visualizaiton (VMV 2011), Eurographics Association.
Abstract
Ambient occlusion (AO) is a popular technique for real-time as well as offline rendering. One of its benefits is a gain in efficiency due to the fact that occlusion and shading are decoupled which results in an average occlusion that modulates the surface shading. Its main drawback is a loss of realism due to the lack of directional occlusion and lighting. As a solution, the use of bent normals was proposed for offline rendering. This work describes how to compute bent normals and bent cones in combination with screen-space ambient occlusion. These extensions combine the speed and simplicity of AO with physically more plausible lighting.
Export
BibTeX
@inproceedings{SsbcVMVKlehm2011, TITLE = {Bent Normals and Cones in Screen-space}, AUTHOR = {Klehm, Oliver and Ritschel, Tobias and Eisemann, Elmar and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-905673-85-2}, DOI = {10.2312/PE/VMV/VMV11/177-182}, LOCALID = {Local-ID: C125675300671F7B-553297BEE234DBA3C1257922004C3F89-SsbcVMVKlehm2011}, PUBLISHER = {Eurographics Association}, YEAR = {2011}, DATE = {2011}, ABSTRACT = {Ambient occlusion (AO) is a popular technique for real-time as well as offline rendering. One of its benefits is a gain in efficiency due to the fact that occlusion and shading are decoupled which results in an average occlusion that modulates the surface shading. Its main drawback is a loss of realism due to the lack of directional occlusion and lighting. As a solution, the use of bent normals was proposed for offline rendering. This work describes how to compute bent normals and bent cones in combination with screen-space ambient occlusion. These extensions combine the speed and simplicity of AO with physically more plausible lighting.}, BOOKTITLE = {Vision, Modeling, and Visualizaiton (VMV 2011)}, EDITOR = {Eisert, Peter and Hornegger, Joachim and Polthier, Konrad}, PAGES = {177--182}, ADDRESS = {Berlin, Germany}, }
Endnote
%0 Conference Proceedings %A Klehm, Oliver %A Ritschel, Tobias %A Eisemann, Elmar %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Bent Normals and Cones in Screen-space : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-139B-5 %F EDOC: 618877 %R 10.2312/PE/VMV/VMV11/177-182 %F OTHER: Local-ID: C125675300671F7B-553297BEE234DBA3C1257922004C3F89-SsbcVMVKlehm2011 %D 2011 %B 16th International Workshop on Vision, Modeling and Visualization %Z date of event: 2011-10-04 - 2011-10-06 %C Berlin, Germany %X Ambient occlusion (AO) is a popular technique for real-time as well as offline rendering. One of its benefits is a gain in efficiency due to the fact that occlusion and shading are decoupled which results in an average occlusion that modulates the surface shading. Its main drawback is a loss of realism due to the lack of directional occlusion and lighting. As a solution, the use of bent normals was proposed for offline rendering. This work describes how to compute bent normals and bent cones in combination with screen-space ambient occlusion. These extensions combine the speed and simplicity of AO with physically more plausible lighting. %B Vision, Modeling, and Visualizaiton %E Eisert, Peter; Hornegger, Joachim; Polthier, Konrad %P 177 - 182 %I Eurographics Association %@ 978-3-905673-85-2
Kerber, J., Wand, M., Krüger, J., and Seidel, H.-P. 2011. Partial Symmetry Detection in Volume Data. Vision, Modeling, and Visualization (VMV 2011), Eurographics Association.
Abstract
In this paper, we present an algorithm for detecting partial Euclidean symmetries in volume data. Our algorithm finds subsets in voxel data that map to each other approximately under translations, rotations, and reflections. We implement the search for partial symmetries efficiently and robustly using a feature-based approach: We first reduce the volume to salient line features and then create transformation candidates from matching only local configurations of these line networks. Afterwards, only a shortlist of transformation candidates need to be verified using expensive dense volume matching. We apply our technique on both synthetic test scenes as well as real CT scans and show that we can recover a large amount of partial symmetries for complexly structured volume data sets.
Export
BibTeX
@inproceedings{Kerber2011_1, TITLE = {Partial Symmetry Detection in Volume Data}, AUTHOR = {Kerber, Jens and Wand, Michael and Kr{\"u}ger, Jens and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-905673-85-2}, DOI = {10.2312/PE/VMV/VMV11/041-048}, PUBLISHER = {Eurographics Association}, YEAR = {2011}, DATE = {2011}, ABSTRACT = {In this paper, we present an algorithm for detecting partial Euclidean symmetries in volume data. Our algorithm finds subsets in voxel data that map to each other approximately under translations, rotations, and reflections. We implement the search for partial symmetries efficiently and robustly using a feature-based approach: We first reduce the volume to salient line features and then create transformation candidates from matching only local configurations of these line networks. Afterwards, only a shortlist of transformation candidates need to be verified using expensive dense volume matching. We apply our technique on both synthetic test scenes as well as real CT scans and show that we can recover a large amount of partial symmetries for complexly structured volume data sets.}, BOOKTITLE = {Vision, Modeling, and Visualization (VMV 2011)}, EDITOR = {Eisert, Peter and Hornegger, Joachim and Polthier, Konrad}, PAGES = {41--48}, ADDRESS = {Berlin, Germany}, }
Endnote
%0 Conference Proceedings %A Kerber, Jens %A Wand, Michael %A Kr&#252;ger, Jens %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Partial Symmetry Detection in Volume Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13E9-4 %F EDOC: 618878 %R 10.2312/PE/VMV/VMV11/041-048 %D 2011 %B 16th International Workshop on Vision, Modeling and Visualization %Z date of event: 2011-10-04 - 2011-10-06 %C Berlin, Germany %X In this paper, we present an algorithm for detecting partial Euclidean symmetries in volume data. Our algorithm finds subsets in voxel data that map to each other approximately under translations, rotations, and reflections. We implement the search for partial symmetries efficiently and robustly using a feature-based approach: We first reduce the volume to salient line features and then create transformation candidates from matching only local configurations of these line networks. Afterwards, only a shortlist of transformation candidates need to be verified using expensive dense volume matching. We apply our technique on both synthetic test scenes as well as real CT scans and show that we can recover a large amount of partial symmetries for complexly structured volume data sets. %B Vision, Modeling, and Visualization %E Eisert, Peter; Hornegger, Joachim; Polthier, Konrad %P 41 - 48 %I Eurographics Association %@ 978-3-905673-85-2
Hullin, M.B., Lensch, H.P.A., Raskar, R., Seidel, H.-P., and Ihrke, I. 2011a. A dynamic BRDF display. SIGGRAPH ’11: ACM SIGGRAPH 2011 Emerging Technologies, ACM.
Export
BibTeX
@inproceedings{DBLP:conf/siggraph/HullinLRSI11, TITLE = {A dynamic {BRDF} display}, AUTHOR = {Hullin, Matthias B. and Lensch, Hendrik P. A. and Raskar, Ramesh and Seidel, Hans-Peter and Ihrke, Ivo}, LANGUAGE = {eng}, ISBN = {978-1-4503-0969-1}, DOI = {10.1145/2048259.2048260}, PUBLISHER = {ACM}, YEAR = {2011}, DATE = {2011}, BOOKTITLE = {SIGGRAPH '11: ACM SIGGRAPH 2011 Emerging Technologies}, EDITOR = {Krumbholz, Cole}, PAGES = {1--1}, ADDRESS = {Columbia, Canada}, }
Endnote
%0 Conference Proceedings %A Hullin, Matthias B. %A Lensch, Hendrik P. A. %A Raskar, Ramesh %A Seidel, Hans-Peter %A Ihrke, Ivo %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A dynamic BRDF display : %G eng %U http://hdl.handle.net/21.11116/0000-000F-5B40-3 %R 10.1145/2048259.2048260 %D 2011 %B ACM SIGGRAPH 2011 Emerging Technologies %Z date of event: 2011-08-07 - 2011-08-11 %C Columbia, Canada %B SIGGRAPH '11: ACM SIGGRAPH 2011 Emerging Technologies %E Krumbholz, Cole %P 1 - 1 %I ACM %@ 978-1-4503-0969-1
Hullin, M.B., Eisemann, E., Seidel, H.-P., and Lee, S. 2011b. Physically-based Real-time Lens Flare Rendering. ACM Transactions on Graphics30, 4.
Export
BibTeX
@article{DBLP:journals/tog/HullinESL11, TITLE = {Physically-based Real-time Lens Flare Rendering}, AUTHOR = {Hullin, Matthias B. and Eisemann, Elmar and Seidel, Hans-Peter and Lee, Sungkil}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2010324.1965003}, PUBLISHER = {Association for Computing Machinery}, ADDRESS = {New York, NY}, YEAR = {2011}, DATE = {2011}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {30}, NUMBER = {4}, PAGES = {108:1--108:9}, }
Endnote
%0 Journal Article %A Hullin, Matthias B. %A Eisemann, Elmar %A Seidel, Hans-Peter %A Lee, Sungkil %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Physically-based Real-time Lens Flare Rendering : %G eng %U http://hdl.handle.net/21.11116/0000-000F-5B95-3 %R 10.1145/2010324.1965003 %D 2011 %J ACM Transactions on Graphics %V 30 %N 4 %& 108:1 %P 108:1 - 108:9 %I Association for Computing Machinery %C New York, NY %@ false
Hullin, M.B., Lensch, H.P.A., Raskar, R., Seidel, H.-P., and Ihrke, I. 2011c. Dynamic Display of BRDFs. Computer Graphics Forum (Proc. EUROGRAPHICS 2011)30, 2.
Export
BibTeX
@article{Hullin2011, TITLE = {Dynamic Display of {BRDF}s}, AUTHOR = {Hullin, Matthias B. and Lensch, Hendrik P. A. and Raskar, Ramesh and Seidel, Hans-Peter and Ihrke, Ivo}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2011.01891.x}, PUBLISHER = {North Holland}, ADDRESS = {Amsterdam}, YEAR = {2011}, DATE = {2011}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {30}, NUMBER = {2}, PAGES = {475--483}, BOOKTITLE = {EUROGRAPHICS 2011}, }
Endnote
%0 Journal Article %A Hullin, Matthias B. %A Lensch, Hendrik P. A. %A Raskar, Ramesh %A Seidel, Hans-Peter %A Ihrke, Ivo %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Dynamic Display of BRDFs : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13B2-F %F EDOC: 618859 %R 10.1111/j.1467-8659.2011.01891.x %D 2011 %* Review method: peer-reviewed %J Computer Graphics Forum %V 30 %N 2 %& 475 %P 475 - 483 %I North Holland %C Amsterdam %@ false %B EUROGRAPHICS 2011 %O EUROGRAPHICS 2011 EG 2011 The European Association for Computer Graphics 32nd Annual Conference ; Llandudno in Wales, UK, April 11th - 15th, 2011
Helten, T., Müller, M., Tautges, J., Weber, A., and Seidel, H.-P. 2011a. Towards Cross-modal Comparison of Human Motion Data. Pattern Recognition (DAGM 2011), Springer.
Export
BibTeX
@inproceedings{HeltenMTWS11_Cross-modalComparison, TITLE = {Towards Cross-modal Comparison of Human Motion Data}, AUTHOR = {Helten, Thomas and M{\"u}ller, Meinard and Tautges, Jochen and Weber, Andreas and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-642-23122-3}, DOI = {10.1007/978-3-642-23123-0_7}, PUBLISHER = {Springer}, YEAR = {2011}, DATE = {2011}, BOOKTITLE = {Pattern Recognition (DAGM 2011)}, EDITOR = {Mester, Rudolf and Felsberg, Michael}, PAGES = {61--70}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {6835}, ADDRESS = {Frankfurt/Main, Germany}, }
Endnote
%0 Conference Proceedings %A Helten, Thomas %A M&#252;ller, Meinard %A Tautges, Jochen %A Weber, Andreas %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Towards Cross-modal Comparison of Human Motion Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-140F-8 %F EDOC: 618900 %R 10.1007/978-3-642-23123-0_7 %D 2011 %B 33rd Annual Symposium of the German Association for Pattern Recognition %Z date of event: 2011-08-30 - 2011-09-02 %C Frankfurt/Main, Germany %B Pattern Recognition %E Mester, Rudolf; Felsberg, Michael %P 61 - 70 %I Springer %@ 978-3-642-23122-3 %B Lecture Notes in Computer Science %N 6835 %U https://rdcu.be/dJx4h
Helten, T., Brock, H., Müller, M., and Seidel, H.-P. 2011b. Classification of Trampoline Jumps Using Inertial Sensors. Sports Engineering14, 2-4.
Export
BibTeX
@article{HeltenBMS11_ClassificationTrampolineJumps_SE, TITLE = {Classification of Trampoline Jumps Using Inertial Sensors}, AUTHOR = {Helten, Thomas and Brock, Heike and M{\"u}ller, Meinard and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1007/s12283-011-0081-4}, PUBLISHER = {Springer}, ADDRESS = {New York, NY}, YEAR = {2011}, DATE = {2011}, JOURNAL = {Sports Engineering}, VOLUME = {14}, NUMBER = {2-4}, PAGES = {155--164}, }
Endnote
%0 Journal Article %A Helten, Thomas %A Brock, Heike %A M&#252;ller, Meinard %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Classification of Trampoline Jumps Using Inertial Sensors : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13A3-2 %F EDOC: 618898 %R 10.1007/s12283-011-0081-4 %7 2011 %D 2011 %* Review method: peer-reviewed %J Sports Engineering %V 14 %N 2-4 %& 155 %P 155 - 164 %I Springer %C New York, NY
Grochulla, M., Thormählen, T., and Seidel, H.-P. 2011. Using Spatially Distributed Patterns for Multiple View Camera Calibration. Computer Vision / Computer Graphics Collaboration Techniques (MIRAGE 2011), Springer.
Abstract
This paper presents an approach to intrinsic and extrinsic camera parameter <br>calibration from a series of photographs or from video. For the reliable and <br>accurate estimation of camera parameters it is common to use specially designed <br>calibration patterns. However, using a single pattern, a globally consistent <br>calibration is only possible from positions and viewing directions from where <br>this single pattern is visible. To overcome this problem, the presented <br>approach uses multiple coded patterns that can be distributed over a large <br>area. A connection graph representing visible patterns in multiple views is <br>generated, which is used to estimate globally consistent camera parameters for <br>the complete scene. The approach is evaluated on synthetic and real-world <br>ground truth examples. Furthermore, the approach is applied to calibrate the <br>stereo-cameras of a robotic head on a moving platform.
Export
BibTeX
@inproceedings{Grochulla2011, TITLE = {Using Spatially Distributed Patterns for Multiple View Camera Calibration}, AUTHOR = {Grochulla, Martin and Thorm{\"a}hlen, Thorsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-642-24135-2}, DOI = {10.1007/978-3-642-24136-9_10}, PUBLISHER = {Springer}, YEAR = {2011}, DATE = {2011}, ABSTRACT = {This paper presents an approach to intrinsic and extrinsic camera parameter <br>calibration from a series of photographs or from video. For the reliable and <br>accurate estimation of camera parameters it is common to use specially designed <br>calibration patterns. However, using a single pattern, a globally consistent <br>calibration is only possible from positions and viewing directions from where <br>this single pattern is visible. To overcome this problem, the presented <br>approach uses multiple coded patterns that can be distributed over a large <br>area. A connection graph representing visible patterns in multiple views is <br>generated, which is used to estimate globally consistent camera parameters for <br>the complete scene. The approach is evaluated on synthetic and real-world <br>ground truth examples. Furthermore, the approach is applied to calibrate the <br>stereo-cameras of a robotic head on a moving platform.}, BOOKTITLE = {Computer Vision / Computer Graphics Collaboration Techniques (MIRAGE 2011)}, EDITOR = {Gagalowicz, Andr{\'e} and Philips, Wilfried}, PAGES = {110--121}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {6930}, ADDRESS = {Rocquencourt, France}, }
Endnote
%0 Conference Proceedings %A Grochulla, Martin %A Thorm&#228;hlen, Thorsten %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Using Spatially Distributed Patterns for Multiple View Camera Calibration : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-1414-9 %F EDOC: 618889 %R 10.1007/978-3-642-24136-9_10 %D 2011 %B 5th International Conference on Computer Vision / Computer Graphics Collaboration Techniques and Applications %Z date of event: 2011-10-10 - 2011-10-11 %C Rocquencourt, France %X This paper presents an approach to intrinsic and extrinsic camera parameter <br>calibration from a series of photographs or from video. For the reliable and <br>accurate estimation of camera parameters it is common to use specially designed <br>calibration patterns. However, using a single pattern, a globally consistent <br>calibration is only possible from positions and viewing directions from where <br>this single pattern is visible. To overcome this problem, the presented <br>approach uses multiple coded patterns that can be distributed over a large <br>area. A connection graph representing visible patterns in multiple views is <br>generated, which is used to estimate globally consistent camera parameters for <br>the complete scene. The approach is evaluated on synthetic and real-world <br>ground truth examples. Furthermore, the approach is applied to calibrate the <br>stereo-cameras of a robotic head on a moving platform. %B Computer Vision / Computer Graphics Collaboration Techniques %E Gagalowicz, Andr&#233;; Philips, Wilfried %P 110 - 121 %I Springer %@ 978-3-642-24135-2 %B Lecture Notes in Computer Science %N 6930 %U https://rdcu.be/dJxDk
Didyk, P., Ritschel, T., Eisemann, E., Myszkowski, K., and Seidel, H.-P. 2011. A Perceptual Model for Disparity. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2011)30, 4.
Abstract
Binocular disparity is an important cue for the human visual system to recognize spatial layout, both in reality and simulated virtual worlds. This paper introduces a perceptual model of disparity for computer graphics that is used to define a metric to compare a stereo image to an alternative stereo image and to estimate the magnitude of the perceived disparity change. Our model can be used to assess the effect of disparity to control the level of undesirable distortions or enhancements (introduced on purpose). A number of psycho-visual experiments are conducted to quantify the mutual effect of disparity magnitude and frequency to derive the model. Besides difference prediction, other applications include compression, and re-targeting. We also present novel applications in form of hybrid stereo images and backward-compatible stereo. The latter minimizes disparity in order to convey a stereo impression if special equipment is used but produces images that appear almost ordinary to the naked eye. The validity of our model and difference metric is again confirmed in a study.
Export
BibTeX
@article{DidykREMS2011, TITLE = {A Perceptual Model for Disparity}, AUTHOR = {Didyk, Piotr and Ritschel, Tobias and Eisemann, Elmar and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2010324.1964991}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2011}, DATE = {2011}, ABSTRACT = {Binocular disparity is an important cue for the human visual system to recognize spatial layout, both in reality and simulated virtual worlds. This paper introduces a perceptual model of disparity for computer graphics that is used to define a metric to compare a stereo image to an alternative stereo image and to estimate the magnitude of the perceived disparity change. Our model can be used to assess the effect of disparity to control the level of undesirable distortions or enhancements (introduced on purpose). A number of psycho-visual experiments are conducted to quantify the mutual effect of disparity magnitude and frequency to derive the model. Besides difference prediction, other applications include compression, and re-targeting. We also present novel applications in form of hybrid stereo images and backward-compatible stereo. The latter minimizes disparity in order to convey a stereo impression if special equipment is used but produces images that appear almost ordinary to the naked eye. The validity of our model and difference metric is again confirmed in a study.}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {30}, NUMBER = {4}, PAGES = {1--10}, EID = {96}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2011}, }
Endnote
%0 Journal Article %A Didyk, Piotr %A Ritschel, Tobias %A Eisemann, Elmar %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Perceptual Model for Disparity : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-1388-F %F EDOC: 618890 %R 10.1145/2010324.1964991 %7 2011 %D 2011 %* Review method: peer-reviewed %X Binocular disparity is an important cue for the human visual system to recognize spatial layout, both in reality and simulated virtual worlds. This paper introduces a perceptual model of disparity for computer graphics that is used to define a metric to compare a stereo image to an alternative stereo image and to estimate the magnitude of the perceived disparity change. Our model can be used to assess the effect of disparity to control the level of undesirable distortions or enhancements (introduced on purpose). A number of psycho-visual experiments are conducted to quantify the mutual effect of disparity magnitude and frequency to derive the model. Besides difference prediction, other applications include compression, and re-targeting. We also present novel applications in form of hybrid stereo images and backward-compatible stereo. The latter minimizes disparity in order to convey a stereo impression if special equipment is used but produces images that appear almost ordinary to the naked eye. The validity of our model and difference metric is again confirmed in a study. %J ACM Transactions on Graphics %V 30 %N 4 %& 1 %P 1 - 10 %Z sequence number: 96 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2011 %O ACM SIGGRAPH 2011 Vancouver, BC, Canada
Čadík, M., Aydin, T.O., Myszkowski, K., and Seidel, H.-P. 2011. On Evaluation of Video Quality Metrics: an HDR Dataset for Computer Graphics Applications. Human Vision and Electronic Imaging XVI (HVEI 2011), SPIE.
Export
BibTeX
@inproceedings{Cadik2011, TITLE = {On Evaluation of Video Quality Metrics: an {HDR} Dataset for Computer Graphics Applications}, AUTHOR = {{\v C}ad{\'i}k, Martin and Aydin, Tunc Ozan and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-0-8194-8402-4}, URL = {http://dx.doi.org/10.1117/12.878875}, DOI = {10.1117/12.878875}, PUBLISHER = {SPIE}, YEAR = {2011}, DATE = {2011}, BOOKTITLE = {Human Vision and Electronic Imaging XVI (HVEI 2011)}, EDITOR = {Rogowitz, Bernice E. and Pappas, Thrasyvoulos N.}, PAGES = {1--9}, EID = {78650R}, SERIES = {Proceedings of SPIE}, VOLUME = {7865}, ADDRESS = {San Francisco, CA, USA}, }
Endnote
%0 Conference Proceedings %A &#268;ad&#237;k, Martin %A Aydin, Tunc Ozan %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T On Evaluation of Video Quality Metrics: an HDR Dataset for Computer Graphics Applications : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13DF-B %F EDOC: 618862 %R 10.1117/12.878875 %U http://dx.doi.org/10.1117/12.878875 %D 2011 %B Human Vision and Electronic Imaging XVI %Z date of event: 2011-02-24 - 2011-01-27 %C San Francisco, CA, USA %B Human Vision and Electronic Imaging XVI %E Rogowitz, Bernice E.; Pappas, Thrasyvoulos N. %P 1 - 9 %Z sequence number: 78650R %I SPIE %@ 978-0-8194-8402-4 %B Proceedings of SPIE %N 7865
Bokeloh, M., Wand, M., Koltun, V., and Seidel, H.-P. 2011a. Pattern-aware Shape Deformation using Sliding Dockers. ACM Transactions on Graphics30, 6.
Export
BibTeX
@article{DBLP:journals/tog/BokelohWKS11, TITLE = {Pattern-aware Shape Deformation using Sliding Dockers}, AUTHOR = {Bokeloh, Martin and Wand, Michael and Koltun, Vladlen and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2070781.2024157}, PUBLISHER = {Association for Computing Machinery}, ADDRESS = {New York, NY}, YEAR = {2011}, DATE = {2011}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {30}, NUMBER = {6}, PAGES = {123:1--123:10}, }
Endnote
%0 Journal Article %A Bokeloh, Martin %A Wand, Michael %A Koltun, Vladlen %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Pattern-aware Shape Deformation using Sliding Dockers : %G eng %U http://hdl.handle.net/21.11116/0000-000F-5B8E-C %R 10.1145/2070781.2024157 %D 2011 %J ACM Transactions on Graphics %V 30 %N 6 %& 123:1 %P 123:1 - 123:10 %I Association for Computing Machinery %C New York, NY %@ false
Bokeloh, M., Wand, M., Koltun, V., and Seidel, H.-P. 2011b. Pattern-aware Deformation Using Sliding Dockers. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2011)30, 6.
Export
BibTeX
@article{Bokeloh2011, TITLE = {Pattern-aware Deformation Using Sliding Dockers}, AUTHOR = {Bokeloh, Martin and Wand, Michael and Koltun, Vladlen and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, ISBN = {978-1-4503-0807-6}, DOI = {10.1145/2070781.2024157}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2011}, DATE = {2011}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {30}, NUMBER = {6}, PAGES = {1--10}, EID = {123}, BOOKTITLE = {Proceedings of the 2011 SIGGRAPH Asia Conference (ACM SIGGRAPH Asia 2011)}, }
Endnote
%0 Journal Article %A Bokeloh, Martin %A Wand, Michael %A Koltun, Vladlen %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Pattern-aware Deformation Using Sliding Dockers : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13EB-F %F EDOC: 618887 %R 10.1145/2070781.2024157 %D 2011 %* Review method: peer-reviewed %J ACM Transactions on Graphics %V 30 %N 6 %& 1 %P 1 - 10 %Z sequence number: 123 %I ACM %C New York, NY %@ false %B Proceedings of the 2011 SIGGRAPH Asia Conference %O ACM SIGGRAPH Asia 2011 SA'11 ; Hong Kong, China SA 2011 %@ 978-1-4503-0807-6
Berner, A., Wand, M., Mitra, N.J., Mewes, D., and Seidel, H.-P. 2011a. Shape Analysis with Subspace Symmetries. Computer Graphics Forum (Proc. EUROGRAPHICS 2011)30, 2.
Abstract
We address the problem of partial symmetry detection, i.e., the identification of building blocks a complex shape is composed of. Previous techniques identify parts that relate to each other by simple rigid mappings, similarity transforms, or, more recently, intrinsic isometries. Our approach generalizes the notion of partial symmetries to more general deformations. We introduce subspace symmetries whereby we characterize similarity by requiring the set of symmetric parts to form a low dimensional shape space. We present an algorithm to discover subspace symmetries based on detecting linearly correlated correspondences among graphs of invariant features. The detected subspace symmetries along with the modeled variations are useful for a variety of applications including shape completion, non-local and non-rigid denoising. We evaluate our technique on various data sets. We show that for models with pronounced surface features, subspace symmetries can be found fully automatically. For complicated cases, a small amount of user input is used to resolve ambiguities. Our technique computes dense correspondences that can subsequently be used in various applications, such as model repair and denoising.
Export
BibTeX
@article{Berner2011SubspaceSymmetry, TITLE = {Shape Analysis with Subspace Symmetries}, AUTHOR = {Berner, Alexander and Wand, Michael and Mitra, Niloy J. and Mewes, Daniel and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, URL = {http://www.mpi-inf.mpg.de/%7Eaberner/subspace_symmetry_eg11.pdf}, DOI = {10.1111/j.1467-8659.2011.01859.x}, PUBLISHER = {North Holland}, ADDRESS = {Amsterdam}, YEAR = {2011}, DATE = {2011}, ABSTRACT = {We address the problem of partial symmetry detection, i.e., the identification of building blocks a complex shape is composed of. Previous techniques identify parts that relate to each other by simple rigid mappings, similarity transforms, or, more recently, intrinsic isometries. Our approach generalizes the notion of partial symmetries to more general deformations. We introduce subspace symmetries whereby we characterize similarity by requiring the set of symmetric parts to form a low dimensional shape space. We present an algorithm to discover subspace symmetries based on detecting linearly correlated correspondences among graphs of invariant features. The detected subspace symmetries along with the modeled variations are useful for a variety of applications including shape completion, non-local and non-rigid denoising. We evaluate our technique on various data sets. We show that for models with pronounced surface features, subspace symmetries can be found fully automatically. For complicated cases, a small amount of user input is used to resolve ambiguities. Our technique computes dense correspondences that can subsequently be used in various applications, such as model repair and denoising.}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {30}, NUMBER = {2}, PAGES = {277--286}, BOOKTITLE = {EUROGRAPHICS 2011}, }
Endnote
%0 Journal Article %A Berner, Alexander %A Wand, Michael %A Mitra, Niloy J. %A Mewes, Daniel %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Shape Analysis with Subspace Symmetries : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13FA-D %F EDOC: 618857 %R 10.1111/j.1467-8659.2011.01859.x %U http://www.mpi-inf.mpg.de/%7Eaberner/subspace_symmetry_eg11.pdf %D 2011 %* Review method: peer-reviewed %X We address the problem of partial symmetry detection, i.e., the identification of building blocks a complex shape is composed of. Previous techniques identify parts that relate to each other by simple rigid mappings, similarity transforms, or, more recently, intrinsic isometries. Our approach generalizes the notion of partial symmetries to more general deformations. We introduce subspace symmetries whereby we characterize similarity by requiring the set of symmetric parts to form a low dimensional shape space. We present an algorithm to discover subspace symmetries based on detecting linearly correlated correspondences among graphs of invariant features. The detected subspace symmetries along with the modeled variations are useful for a variety of applications including shape completion, non-local and non-rigid denoising. We evaluate our technique on various data sets. We show that for models with pronounced surface features, subspace symmetries can be found fully automatically. For complicated cases, a small amount of user input is used to resolve ambiguities. Our technique computes dense correspondences that can subsequently be used in various applications, such as model repair and denoising. %J Computer Graphics Forum %V 30 %N 2 %& 277 %P 277 - 286 %I North Holland %C Amsterdam %@ false %B EUROGRAPHICS 2011 %O EUROGRAPHICS 2011 EG 2011 The European Associatoin for Computer Graphics 32nd Annual Conference ; Llandudno in Wales, UK, April 11th - 15th, 2011
Berner, A., Burghard, O., Wand, M., Mitra, N., Klein, R., and Seidel, H.-P. 2011b. A Morphable Part Model for Shape Manipulation. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
We introduce morphable part models for smart shape manipulation using an assembly of deformable parts with appropriate boundary conditions. In an analysis phase, we characterize the continuous allowable variations both for the individual parts and their interconnections using Gaussian shape models with low rank covariance. The discrete aspect of how parts can be assembled is captured using a shape grammar. The parts and their interconnection rules are learned semi-automatically from symmetries within a single object or from semantically corresponding parts across a larger set of example models. The learned discrete and continuous structure is encoded as a graph. In the interaction phase, we obtain an interactive yet intuitive shape deformation framework producing realistic deformations on classes of objects that are difficult to edit using existing structure-aware deformation techniques. Unlike previous techniques, our method uses self-similarities from a single model as training input and allows the user to reassemble the identified parts in new configurations, thus exploiting both the discrete and continuous learned variations while ensuring appropriate boundary conditions across part boundaries.
Export
BibTeX
@techreport{BernerBurghardWandMitraKleinSeidel2011, TITLE = {A Morphable Part Model for Shape Manipulation}, AUTHOR = {Berner, Alexander and Burghard, Oliver and Wand, Michael and Mitra, Niloy and Klein, Reinhard and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0946-011X}, NUMBER = {MPI-I-2011-4-005}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2011}, DATE = {2011}, ABSTRACT = {We introduce morphable part models for smart shape manipulation using an assembly of deformable parts with appropriate boundary conditions. In an analysis phase, we characterize the continuous allowable variations both for the individual parts and their interconnections using Gaussian shape models with low rank covariance. The discrete aspect of how parts can be assembled is captured using a shape grammar. The parts and their interconnection rules are learned semi-automatically from symmetries within a single object or from semantically corresponding parts across a larger set of example models. The learned discrete and continuous structure is encoded as a graph. In the interaction phase, we obtain an interactive yet intuitive shape deformation framework producing realistic deformations on classes of objects that are difficult to edit using existing structure-aware deformation techniques. Unlike previous techniques, our method uses self-similarities from a single model as training input and allows the user to reassemble the identified parts in new configurations, thus exploiting both the discrete and continuous learned variations while ensuring appropriate boundary conditions across part boundaries.}, TYPE = {Research Report}, }
Endnote
%0 Report %A Berner, Alexander %A Burghard, Oliver %A Wand, Michael %A Mitra, Niloy %A Klein, Reinhard %A Seidel, Hans-Peter %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T A Morphable Part Model for Shape Manipulation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-6972-0 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2011 %P 33 p. %X We introduce morphable part models for smart shape manipulation using an assembly of deformable parts with appropriate boundary conditions. In an analysis phase, we characterize the continuous allowable variations both for the individual parts and their interconnections using Gaussian shape models with low rank covariance. The discrete aspect of how parts can be assembled is captured using a shape grammar. The parts and their interconnection rules are learned semi-automatically from symmetries within a single object or from semantically corresponding parts across a larger set of example models. The learned discrete and continuous structure is encoded as a graph. In the interaction phase, we obtain an interactive yet intuitive shape deformation framework producing realistic deformations on classes of objects that are difficult to edit using existing structure-aware deformation techniques. Unlike previous techniques, our method uses self-similarities from a single model as training input and allows the user to reassemble the identified parts in new configurations, thus exploiting both the discrete and continuous learned variations while ensuring appropriate boundary conditions across part boundaries. %B Research Report %@ false
Baboud, L., Čadík, M., Eisemann, E., and Seidel, H.-P. 2011. Automatic Photo-to-Terrain Alignment for the Annotation of Mountain Pictures. IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2011), IEEE.
Export
BibTeX
@inproceedings{Baboud2011, TITLE = {Automatic Photo-to-Terrain Alignment for the Annotation of Mountain Pictures}, AUTHOR = {Baboud, Lionel and {\v C}ad{\'i}k, Martin and Eisemann, Elmar and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4577-0394-2}, DOI = {10.1109/CVPR.2011.5995727}, PUBLISHER = {IEEE}, YEAR = {2011}, DATE = {2011}, BOOKTITLE = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2011)}, EDITOR = {Felzenszwalb, Pedro and Forsyth, David and Fua, Pascal}, PAGES = {41--48}, ADDRESS = {Colorado Springs, CO, USA}, }
Endnote
%0 Conference Proceedings %A Baboud, Lionel %A &#268;ad&#237;k, Martin %A Eisemann, Elmar %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Automatic Photo-to-Terrain Alignment for the Annotation of Mountain Pictures : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-1393-6 %F EDOC: 618863 %R 10.1109/CVPR.2011.5995727 %D 2011 %B IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2011-06-20 - 2011-06-25 %C Colorado Springs, CO, USA %B IEEE Conference on Computer Vision and Pattern Recognition %E Felzenszwalb, Pedro; Forsyth, David; Fua, Pascal %P 41 - 48 %I IEEE %@ 978-1-4577-0394-2
Baak, A., Müller, M., Bharaj, G., Seidel, H.-P., and Theobalt, C. 2011. A Data-driven Approach for Real-time Full Body Pose Reconstruction from a Depth Camera. IEEE International Conference on Computer Vision (ICCV 2011), IEEE.
Export
BibTeX
@inproceedings{BaakMuBhSeTh2011_DataDrivenDepthTracking_ICCV, TITLE = {A Data-driven Approach for Real-time Full Body Pose Reconstruction from a Depth Camera}, AUTHOR = {Baak, Andreas and M{\"u}ller, Meinard and Bharaj, Gaurav and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-4577-1101-5}, DOI = {10.1109/ICCV.2011.6126356}, PUBLISHER = {IEEE}, YEAR = {2011}, DATE = {2011}, BOOKTITLE = {IEEE International Conference on Computer Vision (ICCV 2011)}, PAGES = {1092--1099}, ADDRESS = {Barcelona, Spain}, }
Endnote
%0 Conference Proceedings %A Baak, Andreas %A M&#252;ller, Meinard %A Bharaj, Gaurav %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Data-driven Approach for Real-time Full Body Pose Reconstruction from a Depth Camera : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-137E-7 %F EDOC: 618875 %R 10.1109/ICCV.2011.6126356 %D 2011 %B IEEE International Conference on Computer Vision %Z date of event: 2011-11-06 - 2011-11-13 %C Barcelona, Spain %B IEEE International Conference on Computer Vision %P 1092 - 1099 %I IEEE %@ 978-1-4577-1101-5
2010
Yang, B., Dong, Z., Feng, J., Seidel, H.-P., and Kautz, J. 2010. Variance Soft Shadow Mapping. Computer Graphics Forum29, 7.
Abstract
We present variance soft shadow mapping (VSSM) for rendering plausible soft shadow in real-time. VSSM is based on the theoretical framework of percentage-closer soft shadows (PCSS) and exploits recent advances in variance shadow mapping (VSM). Our new formulation allows for the efficient computation of (average) blocker distances, a common bottleneck in PCSS-based methods. Furthermore, we avoid incorrectly lit pixels commonly encountered in VSM-based methods by appropriately subdividing the filter kernel. We demonstrate that VSSM renders highquality soft shadows efficiently (usually over 100 fps) for complex scene settings. Its speed is at least one order of magnitude faster than PCSS for large penumbra.
Export
BibTeX
@article{Dong:2010:VSSM, TITLE = {Variance Soft Shadow Mapping}, AUTHOR = {Yang, Baoguang and Dong, Zhao and Feng, Jieqing and Seidel, Hans-Peter and Kautz, Jan}, LANGUAGE = {eng}, ISSN = {1467-8659}, DOI = {10.1111/j.1467-8659.2010.01800.x}, PUBLISHER = {Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2010}, DATE = {2010}, ABSTRACT = {We present variance soft shadow mapping (VSSM) for rendering plausible soft shadow in real-time. VSSM is based on the theoretical framework of percentage-closer soft shadows (PCSS) and exploits recent advances in variance shadow mapping (VSM). Our new formulation allows for the efficient computation of (average) blocker distances, a common bottleneck in PCSS-based methods. Furthermore, we avoid incorrectly lit pixels commonly encountered in VSM-based methods by appropriately subdividing the filter kernel. We demonstrate that VSSM renders highquality soft shadows efficiently (usually over 100 fps) for complex scene settings. Its speed is at least one order of magnitude faster than PCSS for large penumbra.}, JOURNAL = {Computer Graphics Forum}, VOLUME = {29}, NUMBER = {7}, PAGES = {2127--2134}, }
Endnote
%0 Journal Article %A Yang, Baoguang %A Dong, Zhao %A Feng, Jieqing %A Seidel, Hans-Peter %A Kautz, Jan %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Variance Soft Shadow Mapping : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1793-7 %F EDOC: 537317 %R 10.1111/j.1467-8659.2010.01800.x %7 2010 %D 2010 %* Review method: peer-reviewed %X We present variance soft shadow mapping (VSSM) for rendering plausible soft shadow in real-time. VSSM is based on the theoretical framework of percentage-closer soft shadows (PCSS) and exploits recent advances in variance shadow mapping (VSM). Our new formulation allows for the efficient computation of (average) blocker distances, a common bottleneck in PCSS-based methods. Furthermore, we avoid incorrectly lit pixels commonly encountered in VSM-based methods by appropriately subdividing the filter kernel. We demonstrate that VSSM renders highquality soft shadows efficiently (usually over 100 fps) for complex scene settings. Its speed is at least one order of magnitude faster than PCSS for large penumbra. %J Computer Graphics Forum %V 29 %N 7 %& 2127 %P 2127 - 2134 %I Blackwell %C Oxford, UK %@ false
Wang, O., Fuchs, M., Fuchs, C., Davis, J., Seidel, H.-P., and Lensch, H.P.A. 2010. A Context-aware Light Source. IEEE International Conference on Computational Photography (ICCP 2010), IEEE.
Abstract
We present a technique that combines the visual benefits of virtual enhancement with the intuitive interaction of the real world. We accomplish this by introducing the concept of a context-aware light source. This light source provides illumination based on scene context in real-time. This allows us to project feature enhancement in-place onto an object while it is being manipulated by the user. A separate proxy light source can be employed to enable freely programmable shading responses for interactive scene analysis. We created a prototype hardware setup and have implemented several applications that demonstrate the approach, such as a sharpening light, an edge highlighting light, an accumulation light, and a light with a programmable, nonlinear shading response.
Export
BibTeX
@inproceedings{WangICCP2010, TITLE = {A Context-aware Light Source}, AUTHOR = {Wang, Oliver and Fuchs, Martin and Fuchs, Christian and Davis, James and Seidel, Hans-Peter and Lensch, Hendrik P. A.}, LANGUAGE = {eng}, ISBN = {978-1-4244-7022-8}, URL = {http://graphics.soe.ucsc.edu/publications/data/wango-context.pdf}, DOI = {10.1109/ICCPHOT.2010.5585091}, PUBLISHER = {IEEE}, YEAR = {2010}, DATE = {2010}, ABSTRACT = {We present a technique that combines the visual benefits of virtual enhancement with the intuitive interaction of the real world. We accomplish this by introducing the concept of a context-aware light source. This light source provides illumination based on scene context in real-time. This allows us to project feature enhancement in-place onto an object while it is being manipulated by the user. A separate proxy light source can be employed to enable freely programmable shading responses for interactive scene analysis. We created a prototype hardware setup and have implemented several applications that demonstrate the approach, such as a sharpening light, an edge highlighting light, an accumulation light, and a light with a programmable, nonlinear shading response.}, BOOKTITLE = {IEEE International Conference on Computational Photography (ICCP 2010)}, PAGES = {1--8}, ADDRESS = {Cambridge, MA, USA}, }
Endnote
%0 Conference Proceedings %A Wang, Oliver %A Fuchs, Martin %A Fuchs, Christian %A Davis, James %A Seidel, Hans-Peter %A Lensch, Hendrik P. A. %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Context-aware Light Source : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1726-0 %F EDOC: 537298 %R 10.1109/ICCPHOT.2010.5585091 %U http://graphics.soe.ucsc.edu/publications/data/wango-context.pdf %D 2010 %B International Conference on Computational Photography %Z date of event: 2010-03-28 - 2010-03-30 %C Cambridge, MA, USA %X We present a technique that combines the visual benefits of virtual enhancement with the intuitive interaction of the real world. We accomplish this by introducing the concept of a context-aware light source. This light source provides illumination based on scene context in real-time. This allows us to project feature enhancement in-place onto an object while it is being manipulated by the user. A separate proxy light source can be employed to enable freely programmable shading responses for interactive scene analysis. We created a prototype hardware setup and have implemented several applications that demonstrate the approach, such as a sharpening light, an edge highlighting light, an accumulation light, and a light with a programmable, nonlinear shading response. %B IEEE International Conference on Computational Photography %P 1 - 8 %I IEEE %@ 978-1-4244-7022-8
Thormählen, T., Hasler, N., Wand, M., and Seidel, H.-P. 2010. Registration of Sub-Sequence and Multi-Camera Reconstructions for Camera Motion Estimation. Journal of Virtual Reality and Broadcasting7, 2.
Export
BibTeX
@article{Thormahlen2010jvrb, TITLE = {Registration of Sub-Sequence and Multi-Camera Reconstructions for Camera Motion Estimation}, AUTHOR = {Thorm{\"a}hlen, Thorsten and Hasler, Nils and Wand, Michael and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1860-2037}, DOI = {10.20385/1860-2037/7.2010.2}, PUBLISHER = {HBZ}, ADDRESS = {K{\"o}ln}, YEAR = {2010}, DATE = {2010}, JOURNAL = {Journal of Virtual Reality and Broadcasting}, VOLUME = {7}, NUMBER = {2}, PAGES = {1--10}, }
Endnote
%0 Journal Article %A Thorm&#228;hlen, Thorsten %A Hasler, Nils %A Wand, Michael %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Registration of Sub-Sequence and Multi-Camera Reconstructions for Camera Motion Estimation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1785-9 %F EDOC: 537301 %R 10.20385/1860-2037/7.2010.2 %D 2010 %J Journal of Virtual Reality and Broadcasting %V 7 %N 2 %& 1 %P 1 - 10 %I HBZ %C K&#246;ln %@ false
Theobalt, C., de Aguiar, E., Stoll, C., Seidel, H.-P., and Thrun, S. 2010. Performance Capture from Multi-view Video. In: Image and Geometry Procesing for 3D-Cinematography. Springer, Berlin.
Export
BibTeX
@incollection{TheobaltAguiar2010, TITLE = {Performance Capture from Multi-view Video}, AUTHOR = {Theobalt, Christian and de Aguiar, Edilson and Stoll, Carsten and Seidel, Hans-Peter and Thrun, Sebastian}, LANGUAGE = {eng}, ISBN = {978-3-642-12391-7}, DOI = {10.1007/978-3-642-12392-4_6}, PUBLISHER = {Springer}, ADDRESS = {Berlin}, YEAR = {2010}, DATE = {2010}, BOOKTITLE = {Image and Geometry Procesing for 3D-Cinematography}, EDITOR = {Ronfard, R{\'e}mi and Taubin, Gabriel}, PAGES = {127--149}, SERIES = {Geometry and Computing}, VOLUME = {5}, }
Endnote
%0 Book Section %A Theobalt, Christian %A de Aguiar, Edilson %A Stoll, Carsten %A Seidel, Hans-Peter %A Thrun, Sebastian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Performance Capture from Multi-view Video : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-177A-3 %F EDOC: 537270 %R 10.1007/978-3-642-12392-4_6 %D 2010 %B Image and Geometry Procesing for 3D-Cinematography %E Ronfard, R&#233;mi; Taubin, Gabriel %P 127 - 149 %I Springer %C Berlin %@ 978-3-642-12391-7 %S Geometry and Computing %N 5 %U https://rdcu.be/dJwOe
Tevs, A., Wand, M., Ihrke, I., and Seidel, H.-P. 2010. A Bayesian Approach to Manifold Topology Reconstruction. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
In this paper, we investigate the problem of statistical reconstruction of piecewise linear manifold topology. Given a noisy, probably undersampled point cloud from a one- or two-manifold, the algorithm reconstructs an approximated most likely mesh in a Bayesian sense from which the sample might have been taken. We incorporate statistical priors on the object geometry to improve the reconstruction quality if additional knowledge about the class of original shapes is available. The priors can be formulated analytically or learned from example geometry with known manifold tessellation. The statistical objective function is approximated by a linear programming / integer programming problem, for which a globally optimal solution is found. We apply the algorithm to a set of 2D and 3D reconstruction examples, demon-strating that a statistics-based manifold reconstruction is feasible, and still yields plausible results in situations where sampling conditions are violated.
Export
BibTeX
@techreport{TevsTechReport2009, TITLE = {A Bayesian Approach to Manifold Topology Reconstruction}, AUTHOR = {Tevs, Art and Wand, Michael and Ihrke, Ivo and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0946-011X}, NUMBER = {MPI-I-2009-4-002}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2010}, DATE = {2010}, ABSTRACT = {In this paper, we investigate the problem of statistical reconstruction of piecewise linear manifold topology. Given a noisy, probably undersampled point cloud from a one- or two-manifold, the algorithm reconstructs an approximated most likely mesh in a Bayesian sense from which the sample might have been taken. We incorporate statistical priors on the object geometry to improve the reconstruction quality if additional knowledge about the class of original shapes is available. The priors can be formulated analytically or learned from example geometry with known manifold tessellation. The statistical objective function is approximated by a linear programming / integer programming problem, for which a globally optimal solution is found. We apply the algorithm to a set of 2D and 3D reconstruction examples, demon-strating that a statistics-based manifold reconstruction is feasible, and still yields plausible results in situations where sampling conditions are violated.}, TYPE = {Research Report}, }
Endnote
%0 Report %A Tevs, Art %A Wand, Michael %A Ihrke, Ivo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Bayesian Approach to Manifold Topology Reconstruction : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1722-7 %F EDOC: 537282 %@ 0946-011X %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2010 %P 23 p. %X In this paper, we investigate the problem of statistical reconstruction of piecewise linear manifold topology. Given a noisy, probably undersampled point cloud from a one- or two-manifold, the algorithm reconstructs an approximated most likely mesh in a Bayesian sense from which the sample might have been taken. We incorporate statistical priors on the object geometry to improve the reconstruction quality if additional knowledge about the class of original shapes is available. The priors can be formulated analytically or learned from example geometry with known manifold tessellation. The statistical objective function is approximated by a linear programming / integer programming problem, for which a globally optimal solution is found. We apply the algorithm to a set of 2D and 3D reconstruction examples, demon-strating that a statistics-based manifold reconstruction is feasible, and still yields plausible results in situations where sampling conditions are violated. %B Research Report
Strzodka, R., Shaheen, M., Pajak, D., and Seidel, H.-P. 2010. Cache Oblivious Parallelograms in Iterative Stencil Computations. ICS ’10: Proceedings of the 24th ACM International Conference on Supercomputing, ACM.
Abstract
We present a new cache oblivious scheme for iterative stencil computations that <br>performs beyond system bandwidth limitations as though gigabytes of data could <br>reside in an enormous on-chip cache. We compare execution times for 2D and 3D <br>spatial domains with up to 128 million double precision elements for constant <br>and variable stencils against hand-optimized naive code and the automatic <br>polyhedral parallelizer and locality optimizer PluTo and demonstrate the clear <br>superiority of our results. The performance benefits stem from a tiling <br>structure that caters for data locality, parallelism and vectorization <br>simultaneously. Rather than tiling the iteration space from inside, we take an <br>exterior approach with a predefined hierarchy, simple regular parallelogram <br>tiles and a locality preserving parallelization. These advantages come at the <br>cost of an irregular work-load distribution but a tightly integrated <br>load-balancer ensures a high utilization of all resources.
Export
BibTeX
@inproceedings{StShPa_10CORALS, TITLE = {Cache Oblivious Parallelograms in Iterative Stencil Computations}, AUTHOR = {Strzodka, Robert and Shaheen, Mohammed and Pajak, Dawid and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4503-0018-6}, DOI = {10.1145/1810085.1810096}, PUBLISHER = {ACM}, YEAR = {2010}, DATE = {2010}, ABSTRACT = {We present a new cache oblivious scheme for iterative stencil computations that <br>performs beyond system bandwidth limitations as though gigabytes of data could <br>reside in an enormous on-chip cache. We compare execution times for 2D and 3D <br>spatial domains with up to 128 million double precision elements for constant <br>and variable stencils against hand-optimized naive code and the automatic <br>polyhedral parallelizer and locality optimizer PluTo and demonstrate the clear <br>superiority of our results. The performance benefits stem from a tiling <br>structure that caters for data locality, parallelism and vectorization <br>simultaneously. Rather than tiling the iteration space from inside, we take an <br>exterior approach with a predefined hierarchy, simple regular parallelogram <br>tiles and a locality preserving parallelization. These advantages come at the <br>cost of an irregular work-load distribution but a tightly integrated <br>load-balancer ensures a high utilization of all resources.}, BOOKTITLE = {ICS '10: Proceedings of the 24th ACM International Conference on Supercomputing}, PAGES = {49--59}, ADDRESS = {Tsukuba, Ibaraki, Japan}, }
Endnote
%0 Conference Proceedings %A Strzodka, Robert %A Shaheen, Mohammed %A Pajak, Dawid %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Cache Oblivious Parallelograms in Iterative Stencil Computations : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1742-0 %F EDOC: 537274 %R 10.1145/1810085.1810096 %D 2010 %B 24th ACM International Conference on Supercomputing %Z date of event: 2010-06-02 - 2010-06-04 %C Tsukuba, Ibaraki, Japan %X We present a new cache oblivious scheme for iterative stencil computations that <br>performs beyond system bandwidth limitations as though gigabytes of data could <br>reside in an enormous on-chip cache. We compare execution times for 2D and 3D <br>spatial domains with up to 128 million double precision elements for constant <br>and variable stencils against hand-optimized naive code and the automatic <br>polyhedral parallelizer and locality optimizer PluTo and demonstrate the clear <br>superiority of our results. The performance benefits stem from a tiling <br>structure that caters for data locality, parallelism and vectorization <br>simultaneously. Rather than tiling the iteration space from inside, we take an <br>exterior approach with a predefined hierarchy, simple regular parallelogram <br>tiles and a locality preserving parallelization. These advantages come at the <br>cost of an irregular work-load distribution but a tightly integrated <br>load-balancer ensures a high utilization of all resources. %B ICS '10: Proceedings of the 24th ACM International Conference on Supercomputing %P 49 - 59 %I ACM %@ 978-1-4503-0018-6
Schwarz, M. and Seidel, H.-P. 2010. Fast Parallel Surface and Solid Voxelization on GPUs. ACM Transactions on Graphics29, 6.
Export
BibTeX
@article{DBLP:journals/tog/SchwarzS10, TITLE = {Fast Parallel Surface and Solid Voxelization on {GPUs}}, AUTHOR = {Schwarz, Michael and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/1882261.1866201}, PUBLISHER = {Association for Computing Machinery}, ADDRESS = {New York, NY}, YEAR = {2010}, DATE = {2010}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {29}, NUMBER = {6}, PAGES = {179:1--179:9}, }
Endnote
%0 Journal Article %A Schwarz, Michael %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Fast Parallel Surface and Solid Voxelization on GPUs : %G eng %U http://hdl.handle.net/21.11116/0000-000F-5B1D-C %R 10.1145/1882261.1866201 %D 2010 %J ACM Transactions on Graphics %V 29 %N 6 %& 179:1 %P 179:1 - 179:9 %I Association for Computing Machinery %C New York, NY %@ false
Schultz, T., Theisel, H., and Seidel, H.-P. 2010. Crease Surfaces: From Theory to Extraction and Application to Diffusion Tensor MRI. IEEE Transactions on Visualization and Computer Graphics16, 1.
Abstract
Crease surfaces are two-dimensional manifolds along which a scalar field <br>assumes a local maximum (ridge) or a local minimum (valley) in a constrained <br>space. Unlike isosurfaces, they are able to capture extremal structures in the <br>data. Creases have a long tradition in image processing and computer vision, <br>and have recently become a popular tool for visualization. When extracting <br>crease surfaces, degeneracies of the Hessian (i.e., lines along which two <br>eigenvalues are equal) have so far been ignored. We show that these loci, <br>however, have two important consequences for the topology of crease surfaces: <br>First, creases are bounded not only by a side constraint on eigenvalue sign, <br>but also by Hessian degeneracies. Second, crease surfaces are not, in general, <br>orientable. We describe an efficient algorithm for the extraction of crease <br>surfaces which takes these insights into account and demonstrate that it <br>produces more accurate results than previous approaches. Finally, we show that <br>diffusion tensor magnetic resonance imaging (DT-MRI) stream surfaces, which <br>were previously used for the analysis of planar regions in diffusion tensor MRI <br>data, are mathematically ill-defined. As an example application of our method, <br>creases in a measure of planarity are presented as a viable substitute.
Export
BibTeX
@article{Schultz:TVCG10, TITLE = {Crease Surfaces: {F}rom Theory to Extraction and Application to Diffusion Tensor {MRI}}, AUTHOR = {Schultz, Thomas and Theisel, Holger and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2009.44}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {New York, NY}, YEAR = {2010}, DATE = {2010}, ABSTRACT = {Crease surfaces are two-dimensional manifolds along which a scalar field <br>assumes a local maximum (ridge) or a local minimum (valley) in a constrained <br>space. Unlike isosurfaces, they are able to capture extremal structures in the <br>data. Creases have a long tradition in image processing and computer vision, <br>and have recently become a popular tool for visualization. When extracting <br>crease surfaces, degeneracies of the Hessian (i.e., lines along which two <br>eigenvalues are equal) have so far been ignored. We show that these loci, <br>however, have two important consequences for the topology of crease surfaces: <br>First, creases are bounded not only by a side constraint on eigenvalue sign, <br>but also by Hessian degeneracies. Second, crease surfaces are not, in general, <br>orientable. We describe an efficient algorithm for the extraction of crease <br>surfaces which takes these insights into account and demonstrate that it <br>produces more accurate results than previous approaches. Finally, we show that <br>diffusion tensor magnetic resonance imaging (DT-MRI) stream surfaces, which <br>were previously used for the analysis of planar regions in diffusion tensor MRI <br>data, are mathematically ill-defined. As an example application of our method, <br>creases in a measure of planarity are presented as a viable substitute.}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics}, VOLUME = {16}, NUMBER = {1}, PAGES = {109--119}, }
Endnote
%0 Journal Article %A Schultz, Thomas %A Theisel, Holger %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Crease Surfaces: From Theory to Extraction and Application to Diffusion Tensor MRI : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-174A-F %F EDOC: 537280 %R 10.1109/TVCG.2009.44 %D 2010 %* Review method: peer-reviewed %X Crease surfaces are two-dimensional manifolds along which a scalar field <br>assumes a local maximum (ridge) or a local minimum (valley) in a constrained <br>space. Unlike isosurfaces, they are able to capture extremal structures in the <br>data. Creases have a long tradition in image processing and computer vision, <br>and have recently become a popular tool for visualization. When extracting <br>crease surfaces, degeneracies of the Hessian (i.e., lines along which two <br>eigenvalues are equal) have so far been ignored. We show that these loci, <br>however, have two important consequences for the topology of crease surfaces: <br>First, creases are bounded not only by a side constraint on eigenvalue sign, <br>but also by Hessian degeneracies. Second, crease surfaces are not, in general, <br>orientable. We describe an efficient algorithm for the extraction of crease <br>surfaces which takes these insights into account and demonstrate that it <br>produces more accurate results than previous approaches. Finally, we show that <br>diffusion tensor magnetic resonance imaging (DT-MRI) stream surfaces, which <br>were previously used for the analysis of planar regions in diffusion tensor MRI <br>data, are mathematically ill-defined. As an example application of our method, <br>creases in a measure of planarity are presented as a viable substitute. %J IEEE Transactions on Visualization and Computer Graphics %V 16 %N 1 %& 109 %P 109 - 119 %I IEEE Computer Society %C New York, NY %@ false
Ritschel, T., Thormählen, T., Dachsbacher, C., Kautz, J., and Seidel, H.-P. 2010. Interactive On-Surface Signal Deformation. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2010)29, 4.
Export
BibTeX
@article{Ritschel2010SigDeform, TITLE = {Interactive On-Surface Signal Deformation}, AUTHOR = {Ritschel, Tobias and Thorm{\"a}hlen, Thorsten and Dachsbacher, Carsten and Kautz, Jan and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/1778765.1778773}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2010}, DATE = {2010}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {29}, NUMBER = {4}, PAGES = {1--8}, EID = {36}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2010}, }
Endnote
%0 Journal Article %A Ritschel, Tobias %A Thorm&#228;hlen, Thorsten %A Dachsbacher, Carsten %A Kautz, Jan %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive On-Surface Signal Deformation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1759-D %F EDOC: 537302 %R 10.1145/1778765.1778773 %7 2010 %D 2010 %* Review method: peer-reviewed %J ACM Transactions on Graphics %V 29 %N 4 %& 1 %P 1 - 8 %Z sequence number: 36 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2010 %O ACM SIGGRAPH 2010 Los Angeles, CA, [25-29 July 2010]
Pons-Moll, G., Baak, A., Helten, T., Müller, M., Seidel, H.-P., and Rosenhahn, B. 2010. Multisensor-Fusion for 3D Full-Body Human Motion Capture. IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2010), IEEE.
Export
BibTeX
@inproceedings{PonsBaHeMuSeRo10_MultisensorFusion_CVPR, TITLE = {Multisensor-Fusion for {3D} Full-Body Human Motion Capture}, AUTHOR = {Pons-Moll, Gerard and Baak, Andreas and Helten, Thomas and M{\"u}ller, Meinard and Seidel, Hans-Peter and Rosenhahn, Bodo}, LANGUAGE = {eng}, ISBN = {978-1-424-46984-0}, URL = {http://dx.doi.org/10.1109/CVPR.2010.5540153}, DOI = {10.1109/CVPR.2010.5540153}, PUBLISHER = {IEEE}, YEAR = {2010}, DATE = {2010}, BOOKTITLE = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2010)}, PAGES = {663--670}, ADDRESS = {San Francisco, CA, USA}, }
Endnote
%0 Conference Proceedings %A Pons-Moll, Gerard %A Baak, Andreas %A Helten, Thomas %A M&#252;ller, Meinard %A Seidel, Hans-Peter %A Rosenhahn, Bodo %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Multisensor-Fusion for 3D Full-Body Human Motion Capture : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-176E-0 %F EDOC: 537294 %R 10.1109/CVPR.2010.5540153 %U http://dx.doi.org/10.1109/CVPR.2010.5540153 %D 2010 %B IEEE Conference on Computer Vision on Pattern Recognition %Z date of event: 2010-06-13 - 2010-06-18 %C San Francisco, CA, USA %B IEEE Conference on Computer Vision and Pattern Recognition %P 663 - 670 %I IEEE %@ 978-1-424-46984-0
Pajak, D., Čadík, M., Aydin, T.O., Myszkowski, K., and Seidel, H.-P. 2010a. Visual Maladaptation in Contrast Domain. Human Vision and Electronic Imaging XV (HVEI 2010), SPIE.
Export
BibTeX
@inproceedings{Pajak2010, TITLE = {Visual Maladaptation in Contrast Domain}, AUTHOR = {Pajak, Dawid and {\v C}ad{\'i}k, Martin and Aydin, Tunc Ozan and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {9780819479204}, DOI = {10.1117/12.844934}, PUBLISHER = {SPIE}, YEAR = {2010}, DATE = {2010}, BOOKTITLE = {Human Vision and Electronic Imaging XV (HVEI 2010)}, EDITOR = {Rogowitz, Bernice and Pappas, Thrasyvoulous N.}, PAGES = {1--12}, EID = {752710}, SERIES = {Proceedings of SPIE}, VOLUME = {7527}, ADDRESS = {San Jose, CA, USA}, }
Endnote
%0 Conference Proceedings %A Pajak, Dawid %A &#268;ad&#237;k, Martin %A Aydin, Tunc Ozan %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Visual Maladaptation in Contrast Domain : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-179C-6 %F EDOC: 537311 %R 10.1117/12.844934 %D 2010 %B Human Vision and Electronic Imaging XV %Z date of event: 2010-01-18 - 2010-01-21 %C San Jose, CA, USA %B Human Vision and Electronic Imaging XV %E Rogowitz, Bernice; Pappas, Thrasyvoulous N. %P 1 - 12 %Z sequence number: 752710 %I SPIE %@ 9780819479204 %B Proceedings of SPIE %N 7527
Pajak, D., Čadík, M., Aydin, T.O., Okabe, M., Myszkowski, K., and Seidel, H.-P. 2010b. Contrast Prescription for Multiscale Image Editing. The Visual Computer26, 6.
Export
BibTeX
@article{Cadik2010, TITLE = {Contrast Prescription for Multiscale Image Editing}, AUTHOR = {Pajak, Dawid and {\v C}ad{\'i}k, Martin and Aydin, Tunc Ozan and Okabe, Makoto and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0178-2789}, DOI = {10.1007/s00371-010-0485-3}, PUBLISHER = {Springer International}, ADDRESS = {Berlin}, YEAR = {2010}, DATE = {2010}, JOURNAL = {The Visual Computer}, VOLUME = {26}, NUMBER = {6}, PAGES = {739--748}, }
Endnote
%0 Journal Article %A Pajak, Dawid %A &#268;ad&#237;k, Martin %A Aydin, Tunc Ozan %A Okabe, Makoto %A Myszkowski, Karol %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Contrast Prescription for Multiscale Image Editing : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1748-4 %F EDOC: 537310 %R 10.1007/s00371-010-0485-3 %7 2010 %D 2010 %* Review method: peer-reviewed %J The Visual Computer %V 26 %N 6 %& 739 %P 739 - 748 %I Springer International %C Berlin %@ false %U https://rdcu.be/dJw1N
Lee, S., Eisemann, E., and Seidel, H.-P. 2010. Real-time Lens Blur Effects and Focus Control. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2010)29, 4.
Abstract
We present a novel rendering system for defocus-blur and lens effects. It <br>supports physically-based rendering and outperforms previous approaches by <br>involving a novel GPU-based tracing method. Our solution achieves more <br>precision than competing real-time solutions and our results are mostly <br>indistinguishable from offline rendering. Our method is also more general and <br>can integrate advanced simulations, such as simple geometric lens models <br>enabling various lens aberration effects. These latter are crucial for realism, <br>but are often employed in artistic contexts too. We show that available <br>artistic lenses can be simulated by our method. In this spirit, our work <br>introduces an intuitive control over depth-of-field effects. The physical basis <br>is crucial as a starting point to enable new artistic renderings based on a <br>generalized focal surface to emphasize particular elements in the scene while <br>retaining a realistic look. Our real-time solution provides realistic, as well <br>as plausible expressive results.
Export
BibTeX
@article{Lee2010lensblur, TITLE = {Real-time Lens Blur Effects and Focus Control}, AUTHOR = {Lee, Sungkil and Eisemann, Elmar and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/1778765.1778802}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2010}, DATE = {2010}, ABSTRACT = {We present a novel rendering system for defocus-blur and lens effects. It <br>supports physically-based rendering and outperforms previous approaches by <br>involving a novel GPU-based tracing method. Our solution achieves more <br>precision than competing real-time solutions and our results are mostly <br>indistinguishable from offline rendering. Our method is also more general and <br>can integrate advanced simulations, such as simple geometric lens models <br>enabling various lens aberration effects. These latter are crucial for realism, <br>but are often employed in artistic contexts too. We show that available <br>artistic lenses can be simulated by our method. In this spirit, our work <br>introduces an intuitive control over depth-of-field effects. The physical basis <br>is crucial as a starting point to enable new artistic renderings based on a <br>generalized focal surface to emphasize particular elements in the scene while <br>retaining a realistic look. Our real-time solution provides realistic, as well <br>as plausible expressive results.}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {29}, NUMBER = {4}, PAGES = {1--7}, EID = {65}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2010}, }
Endnote
%0 Journal Article %A Lee, Sungkil %A Eisemann, Elmar %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Real-time Lens Blur Effects and Focus Control : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1781-2 %F EDOC: 537318 %R 10.1145/1778765.1778802 %7 2010 %D 2010 %X We present a novel rendering system for defocus-blur and lens effects. It <br>supports physically-based rendering and outperforms previous approaches by <br>involving a novel GPU-based tracing method. Our solution achieves more <br>precision than competing real-time solutions and our results are mostly <br>indistinguishable from offline rendering. Our method is also more general and <br>can integrate advanced simulations, such as simple geometric lens models <br>enabling various lens aberration effects. These latter are crucial for realism, <br>but are often employed in artistic contexts too. We show that available <br>artistic lenses can be simulated by our method. In this spirit, our work <br>introduces an intuitive control over depth-of-field effects. The physical basis <br>is crucial as a starting point to enable new artistic renderings based on a <br>generalized focal surface to emphasize particular elements in the scene while <br>retaining a realistic look. Our real-time solution provides realistic, as well <br>as plausible expressive results. %J ACM Transactions on Graphics %O TOG %V 29 %N 4 %& 1 %P 1 - 7 %Z sequence number: 65 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2010 %O ACM SIGGRAPH 2010 Los Angeles, CA ; [25-29 July 2010]
Kurz, C., Thormählen, T., Seidel, H.-P., Ritschel, T., and Eisemann, E. 2010. Camera Motion Style Transfer. Conference on Visual Media Production (CVMP 2010), IEEE Computer Society.
Export
BibTeX
@inproceedings{Kurz2010cvmp, TITLE = {Camera Motion Style Transfer}, AUTHOR = {Kurz, Christian and Thorm{\"a}hlen, Thorsten and Seidel, Hans-Peter and Ritschel, Tobias and Eisemann, Elmar}, LANGUAGE = {eng}, ISBN = {978-1-4244-8872-8}, DOI = {10.1109/CVMP.2010.9}, LOCALID = {Local-ID: C125675300671F7B-ABA8A88A84C6FB60C12577A1004736D6-Kurz2010cvmp}, PUBLISHER = {IEEE Computer Society}, YEAR = {2010}, DATE = {2010}, BOOKTITLE = {Conference on Visual Media Production (CVMP 2010)}, PAGES = {9--16}, ADDRESS = {London, UK}, }
Endnote
%0 Conference Proceedings %A Kurz, Christian %A Thorm&#228;hlen, Thorsten %A Seidel, Hans-Peter %A Ritschel, Tobias %A Eisemann, Elmar %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Camera Motion Style Transfer : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1744-C %F EDOC: 537304 %R 10.1109/CVMP.2010.9 %F OTHER: Local-ID: C125675300671F7B-ABA8A88A84C6FB60C12577A1004736D6-Kurz2010cvmp %D 2010 %B Conference on Visual Media Production %Z date of event: 2010-11-17 - 2010-11-18 %C London, UK %B Conference on Visual Media Production %P 9 - 16 %I IEEE Computer Society %@ 978-1-4244-8872-8
Kosov, S., Thormählen, T., and Seidel, H.-P. 2010. Rapid Stereo-vision Enhanced Face Recognition. Proceedings of the 2010 IEEE International Conference on Image Processing (ICIP 2010), IEEE.
Export
BibTeX
@inproceedings{Kosov-et-al_ICIP10, TITLE = {Rapid Stereo-vision Enhanced Face Recognition}, AUTHOR = {Kosov, Sergey and Thorm{\"a}hlen, Thorsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4244-7992-4}, DOI = {10.1109/ICIP.2010.5652010}, LOCALID = {Local-ID: C125675300671F7B-E01CE6CCFB4BA3C3C12577A100369981-Kosov2010recog}, PUBLISHER = {IEEE}, YEAR = {2010}, DATE = {2010}, BOOKTITLE = {Proceedings of the 2010 IEEE International Conference on Image Processing (ICIP 2010)}, PAGES = {2437--2440}, ADDRESS = {Hong Kong, China}, }
Endnote
%0 Conference Proceedings %A Kosov, Sergey %A Thorm&#228;hlen, Thorsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Rapid Stereo-vision Enhanced Face Recognition : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-177C-0 %F EDOC: 537303 %R 10.1109/ICIP.2010.5652010 %F OTHER: Local-ID: C125675300671F7B-E01CE6CCFB4BA3C3C12577A100369981-Kosov2010recog %D 2010 %B 2010 IEEE International Conference on Image Processing %Z date of event: 2010-09-26 - 2010-09-29 %C Hong Kong, China %B Proceedings of the 2010 IEEE International Conference on Image Processing %P 2437 - 2440 %I IEEE %@ 978-1-4244-7992-4
Kerber, J., Tevs, A., Zayer, R., Belyaev, A., and Seidel, H.-P. 2010a. Real-time Generation of Digital Bas-Reliefs. Computer-Aided Design and Applications7, 4.
Abstract
Bas-relief is a form of sculpture where carved or chiseled forms protrude partially and shallowly from the background. Occupying an intermediate place between painting and full 3D sculpture, bas-relief sculpture exploits properties of human visual perception in order to maintain perceptually salient 3D information. In this paper, we present two methods for automatic bas-relief generation from 3D digital shapes. Both methods are inspired by techniques developed for high dynamic range image compression and have the bilateral filter as the main ingredient. We demonstrate that the methods are capable of preserving fine shape features and achieving good compression without compromising the quality of surface details. For artists, bas-relief generation starts from managing the viewer's point of view and compositing the scene. Therefore we strive in our work to streamline this process by focusing on easy and intuitive user interaction which is paramount to artistic applications. Our algorithms allow for real time computation thanks to our implementation on graphics hardware. Besides interactive production of stills, this work offers the possibility for generating bas-relief animations. Last but not least, we explore the generation of artistic reliefs that mimic cubism in painting.
Export
BibTeX
@article{Kerber2010_1, TITLE = {Real-time Generation of Digital Bas-Reliefs}, AUTHOR = {Kerber, Jens and Tevs, Art and Zayer, Rhaleb and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1686-4360}, DOI = {10.3722/cadaps.2010.465-478}, LOCALID = {Local-ID: C125675300671F7B-0DCEE3E97CF972D1C12577F400453F06-Kerber2010_1}, PUBLISHER = {Taylor \& Francis}, ADDRESS = {London}, YEAR = {2010}, ABSTRACT = {Bas-relief is a form of sculpture where carved or chiseled forms protrude partially and shallowly from the background. Occupying an intermediate place between painting and full 3D sculpture, bas-relief sculpture exploits properties of human visual perception in order to maintain perceptually salient 3D information. In this paper, we present two methods for automatic bas-relief generation from 3D digital shapes. Both methods are inspired by techniques developed for high dynamic range image compression and have the bilateral filter as the main ingredient. We demonstrate that the methods are capable of preserving fine shape features and achieving good compression without compromising the quality of surface details. For artists, bas-relief generation starts from managing the viewer's point of view and compositing the scene. Therefore we strive in our work to streamline this process by focusing on easy and intuitive user interaction which is paramount to artistic applications. Our algorithms allow for real time computation thanks to our implementation on graphics hardware. Besides interactive production of stills, this work offers the possibility for generating bas-relief animations. Last but not least, we explore the generation of artistic reliefs that mimic cubism in painting.}, JOURNAL = {Computer-Aided Design and Applications}, VOLUME = {7}, NUMBER = {4}, PAGES = {465--478}, }
Endnote
%0 Journal Article %A Kerber, Jens %A Tevs, Art %A Zayer, Rhaleb %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Real-time Generation of Digital Bas-Reliefs : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-177F-A %F EDOC: 537313 %R 10.3722/cadaps.2010.465-478 %F OTHER: Local-ID: C125675300671F7B-0DCEE3E97CF972D1C12577F400453F06-Kerber2010_1 %7 2010 %D 2010 %* Review method: peer-reviewed %X Bas-relief is a form of sculpture where carved or chiseled forms protrude partially and shallowly from the background. Occupying an intermediate place between painting and full 3D sculpture, bas-relief sculpture exploits properties of human visual perception in order to maintain perceptually salient 3D information. In this paper, we present two methods for automatic bas-relief generation from 3D digital shapes. Both methods are inspired by techniques developed for high dynamic range image compression and have the bilateral filter as the main ingredient. We demonstrate that the methods are capable of preserving fine shape features and achieving good compression without compromising the quality of surface details. For artists, bas-relief generation starts from managing the viewer's point of view and compositing the scene. Therefore we strive in our work to streamline this process by focusing on easy and intuitive user interaction which is paramount to artistic applications. Our algorithms allow for real time computation thanks to our implementation on graphics hardware. Besides interactive production of stills, this work offers the possibility for generating bas-relief animations. Last but not least, we explore the generation of artistic reliefs that mimic cubism in painting. %J Computer-Aided Design and Applications %V 7 %N 4 %& 465 %P 465 - 478 %I Taylor & Francis %C London %@ false
Kerber, J., Bokeloh, M., Wand, M., Krüger, J., and Seidel, H.-P. 2010b. Feature Preserving Sketching of Volume Data. Vision, Modeling & Visualization (VMV 2010), Eurographics Association.
Abstract
In this paper, we present a novel method for extracting feature lines from volume data sets. This leads to a reduction of visual complexity and provides an abstraction of the original data to important structural features. We employ a new iteratively reweighted least-squares approach that allows us to detect sharp creases and to preserve important features such as corners or intersection of feature lines accurately. Traditional least-squares methods This is important for both visual quality as well as reliable further processing in feature detection algorithms. Our algorithm is efficient and easy to implement, and nevertheless effective and robust to noise. We show results for a number of different data sets.
Export
BibTeX
@inproceedings{Kerber2010_2, TITLE = {Feature Preserving Sketching of Volume Data}, AUTHOR = {Kerber, Jens and Bokeloh, Martin and Wand, Michael and Kr{\"u}ger, Jens and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-905673-79-1}, DOI = {10.2312/PE/VMV/VMV10/195-202}, PUBLISHER = {Eurographics Association}, YEAR = {2010}, DATE = {2010}, ABSTRACT = {In this paper, we present a novel method for extracting feature lines from volume data sets. This leads to a reduction of visual complexity and provides an abstraction of the original data to important structural features. We employ a new iteratively reweighted least-squares approach that allows us to detect sharp creases and to preserve important features such as corners or intersection of feature lines accurately. Traditional least-squares methods This is important for both visual quality as well as reliable further processing in feature detection algorithms. Our algorithm is efficient and easy to implement, and nevertheless effective and robust to noise. We show results for a number of different data sets.}, BOOKTITLE = {Vision, Modeling \& Visualization (VMV 2010)}, EDITOR = {Koch, Reinhard and Kolb, Andreas and Rezk-Salama, Christof}, PAGES = {195--202}, ADDRESS = {Siegen, Germany}, }
Endnote
%0 Conference Proceedings %A Kerber, Jens %A Bokeloh, Martin %A Wand, Michael %A Kr&#252;ger, Jens %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Feature Preserving Sketching of Volume Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1752-C %F EDOC: 537316 %R 10.2312/PE/VMV/VMV10/195-202 %D 2010 %B 15th International Workshop on Vision, Modeling and Visualization %Z date of event: 2010-11-15 - 2010-11-17 %C Siegen, Germany %X In this paper, we present a novel method for extracting feature lines from volume data sets. This leads to a reduction of visual complexity and provides an abstraction of the original data to important structural features. We employ a new iteratively reweighted least-squares approach that allows us to detect sharp creases and to preserve important features such as corners or intersection of feature lines accurately. Traditional least-squares methods This is important for both visual quality as well as reliable further processing in feature detection algorithms. Our algorithm is efficient and easy to implement, and nevertheless effective and robust to noise. We show results for a number of different data sets. %B Vision, Modeling & Visualization %E Koch, Reinhard; Kolb, Andreas; Rezk-Salama, Christof %P 195 - 202 %I Eurographics Association %@ 978-3-905673-79-1
Jain, A., Kurz, C., Thormählen, T., and Seidel, H.-P. 2010a. Exploiting Global Connectivity Constraints for Reconstruction of 3D Line Segment from Images. IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2010), IEEE.
Export
BibTeX
@inproceedings{Jain2010Lines, TITLE = {Exploiting Global Connectivity Constraints for Reconstruction of {3D} Line Segment from Images}, AUTHOR = {Jain, Arjun and Kurz, Christian and Thorm{\"a}hlen, Thorsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-424-46984-0}, DOI = {10.1109/CVPR.2010.5539781}, PUBLISHER = {IEEE}, YEAR = {2010}, DATE = {2010}, BOOKTITLE = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2010)}, PAGES = {1586--1593}, ADDRESS = {San Francisco, CA, USA}, }
Endnote
%0 Conference Proceedings %A Jain, Arjun %A Kurz, Christian %A Thorm&#228;hlen, Thorsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Exploiting Global Connectivity Constraints for Reconstruction of 3D Line Segment from Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-174F-5 %F EDOC: 537268 %R 10.1109/CVPR.2010.5539781 %D 2010 %B IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2010-06-13 - 2010-06-18 %C San Francisco, CA, USA %B IEEE Conference on Computer Vision and Pattern Recognition %P 1586 - 1593 %I IEEE %@ 978-1-424-46984-0
Jain, A., Thormählen, T., Seidel, H.-P., and Theobalt, C. 2010b. MovieReshape: Tracking and Reshaping of Humans in Videos. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2010)29, 6.
Export
BibTeX
@article{Jain2010MovieReshape, TITLE = {{MovieReshape}: Tracking and Reshaping of Humans in Videos}, AUTHOR = {Jain, Arjun and Thorm{\"a}hlen, Thorsten and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/1866158.1866174}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2010}, DATE = {2010}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {29}, NUMBER = {6}, PAGES = {1--10}, EID = {148}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2010}, EDITOR = {Drettakis, George}, }
Endnote
%0 Journal Article %A Jain, Arjun %A Thorm&#228;hlen, Thorsten %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T MovieReshape: Tracking and Reshaping of Humans in Videos : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1769-9 %F EDOC: 537305 %R 10.1145/1866158.1866174 %7 2010 %D 2010 %J ACM Transactions on Graphics %V 29 %N 6 %& 1 %P 1 - 10 %Z sequence number: 148 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2010 %O ACM SIGGRAPH Asia 2010 Seoul, South Korea ; [December 15 - 18, 2010] SA'10
Hu, W., Dong, Z., Ihrke, I., Grosch, T., Yuan, G., and Seidel, H.-P. 2010. Interactive Volume Caustics in Single-scattering Media. Proceedings I3D 2010, ACM.
Abstract
Volume caustics are intricate illumination patterns formed by light first <br>interacting with a specular surface and subsequently being scattered inside a <br>participating medium. Although this phenomenon can be simulated by existing <br>techniques, image synthesis is usually non-trivial and time-consuming.<br><br>Motivated by interactive applications, we propose a novel volume caustics <br>rendering method for single-scattering participating media. Our method is based <br>on the observation that line rendering of illumination rays into the screen <br>buffer establishes a direct light path between the viewer and the light source. <br>This connection is introduced via a single scattering event for every pixel <br>affected by the line primitive. Since the GPU is a parallel processor, the <br>radiance contributions of these light paths to each of the pixels can be <br>computed and accumulated independently. The implementation of our method is <br>straightforward and we show that it can be seamlessly integrated with existing <br>methods for rendering participating media.<br><br>We achieve high-quality results at real-time frame rates for large and dynamic <br>scenes containing homogeneous participating media. For inhomogeneous media, our <br>method achieves interactive performance that is close to real-time. Our method <br>is based on a simplified physical model and can thus be used for generating <br>physically plausible previews of expensive lighting simulations quickly.
Export
BibTeX
@inproceedings{HDI:2010:VolumeCaustics, TITLE = {Interactive Volume Caustics in Single-scattering Media}, AUTHOR = {Hu, Wei and Dong, Zhao and Ihrke, Ivo and Grosch, Thorsten and Yuan, Guodong and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-60558-939-8}, DOI = {10.1145/1730804.1730822}, PUBLISHER = {ACM}, YEAR = {2010}, DATE = {2010}, ABSTRACT = {Volume caustics are intricate illumination patterns formed by light first <br>interacting with a specular surface and subsequently being scattered inside a <br>participating medium. Although this phenomenon can be simulated by existing <br>techniques, image synthesis is usually non-trivial and time-consuming.<br><br>Motivated by interactive applications, we propose a novel volume caustics <br>rendering method for single-scattering participating media. Our method is based <br>on the observation that line rendering of illumination rays into the screen <br>buffer establishes a direct light path between the viewer and the light source. <br>This connection is introduced via a single scattering event for every pixel <br>affected by the line primitive. Since the GPU is a parallel processor, the <br>radiance contributions of these light paths to each of the pixels can be <br>computed and accumulated independently. The implementation of our method is <br>straightforward and we show that it can be seamlessly integrated with existing <br>methods for rendering participating media.<br><br>We achieve high-quality results at real-time frame rates for large and dynamic <br>scenes containing homogeneous participating media. For inhomogeneous media, our <br>method achieves interactive performance that is close to real-time. Our method <br>is based on a simplified physical model and can thus be used for generating <br>physically plausible previews of expensive lighting simulations quickly.}, BOOKTITLE = {Proceedings I3D 2010}, EDITOR = {Varshney, Amitabh and Wyman, Chris and Aliaga, Daniel and Oliveira, Manuel M.}, PAGES = {109--117}, ADDRESS = {Washington, DC, US}, }
Endnote
%0 Conference Proceedings %A Hu, Wei %A Dong, Zhao %A Ihrke, Ivo %A Grosch, Thorsten %A Yuan, Guodong %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive Volume Caustics in Single-scattering Media : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-175B-9 %F EDOC: 537283 %R 10.1145/1730804.1730822 %D 2010 %B ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games %Z date of event: 2010-02-19 - 2010-02-21 %C Washington, DC, US %X Volume caustics are intricate illumination patterns formed by light first <br>interacting with a specular surface and subsequently being scattered inside a <br>participating medium. Although this phenomenon can be simulated by existing <br>techniques, image synthesis is usually non-trivial and time-consuming.<br><br>Motivated by interactive applications, we propose a novel volume caustics <br>rendering method for single-scattering participating media. Our method is based <br>on the observation that line rendering of illumination rays into the screen <br>buffer establishes a direct light path between the viewer and the light source. <br>This connection is introduced via a single scattering event for every pixel <br>affected by the line primitive. Since the GPU is a parallel processor, the <br>radiance contributions of these light paths to each of the pixels can be <br>computed and accumulated independently. The implementation of our method is <br>straightforward and we show that it can be seamlessly integrated with existing <br>methods for rendering participating media.<br><br>We achieve high-quality results at real-time frame rates for large and dynamic <br>scenes containing homogeneous participating media. For inhomogeneous media, our <br>method achieves interactive performance that is close to real-time. Our method <br>is based on a simplified physical model and can thus be used for generating <br>physically plausible previews of expensive lighting simulations quickly. %B Proceedings I3D 2010 %E Varshney, Amitabh; Wyman, Chris; Aliaga, Daniel; Oliveira, Manuel M. %P 109 - 117 %I ACM %@ 978-1-60558-939-8
Hullin, M.B., Hanika, J., Ajdin, B., Lensch, H.P.A., Kautz, J., and Seidel, H.-P. 2010. Acquisition and Analysis of Bispectral Bidirectional Reflectance and Reradiation Distribution Functions. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2010)29, 4.
Abstract
In fluorescent materials, light from a certain band of incident wavelengths is <br>reradiated at longer wavelengths, i.e., with a reduced per-photon energy. While <br>fluorescent materials are common in everyday life, they have received little <br>attention in computer graphics. Especially, no bidirectional reradiation <br>measurements of fluorescent materials have been available so far. In this <br>paper, we extend the well-known concept of the bidirectional reflectance <br>distribution function (BRDF) to account for energy transfer between <br>wavelengths, resulting in a Bispectral Bidirectional Reflectance and <br>Reradiation Distribution Function (bispectral BRRDF). Using a bidirectional and <br>bispectral measurement setup, we acquire reflectance and reradiation data of a <br>variety of fluorescent materials, including vehicle paints, paper and fabric, <br>and compare their renderings with RGB, RGBxRGB, and spectral BRDFs. Our <br>acquisition is guided by a principal component analysis on complete bispectral <br>data taken under a sparse set of angles. We show that in order to faithfully <br>reproduce the full bispectral information for all other angles, only a very <br>small number of wavelength pairs needs to be measured at a high angular <br>resolution.
Export
BibTeX
@article{Hullin2010, TITLE = {Acquisition and Analysis of Bispectral Bidirectional Reflectance and Reradiation Distribution Functions}, AUTHOR = {Hullin, Matthias B. and Hanika, Johannes and Ajdin, Boris and Lensch, Hendrik P. A. and Kautz, Jan and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301; 0730-0301}, DOI = {10.1145/1833349.1778834}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2010}, DATE = {2010}, ABSTRACT = {In fluorescent materials, light from a certain band of incident wavelengths is <br>reradiated at longer wavelengths, i.e., with a reduced per-photon energy. While <br>fluorescent materials are common in everyday life, they have received little <br>attention in computer graphics. Especially, no bidirectional reradiation <br>measurements of fluorescent materials have been available so far. In this <br>paper, we extend the well-known concept of the bidirectional reflectance <br>distribution function (BRDF) to account for energy transfer between <br>wavelengths, resulting in a Bispectral Bidirectional Reflectance and <br>Reradiation Distribution Function (bispectral BRRDF). Using a bidirectional and <br>bispectral measurement setup, we acquire reflectance and reradiation data of a <br>variety of fluorescent materials, including vehicle paints, paper and fabric, <br>and compare their renderings with RGB, RGBxRGB, and spectral BRDFs. Our <br>acquisition is guided by a principal component analysis on complete bispectral <br>data taken under a sparse set of angles. We show that in order to faithfully <br>reproduce the full bispectral information for all other angles, only a very <br>small number of wavelength pairs needs to be measured at a high angular <br>resolution.}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {29}, NUMBER = {4}, PAGES = {1--7}, EID = {97}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2010}, }
Endnote
%0 Journal Article %A Hullin, Matthias B. %A Hanika, Johannes %A Ajdin, Boris %A Lensch, Hendrik P. A. %A Kautz, Jan %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Acquisition and Analysis of Bispectral Bidirectional Reflectance and Reradiation Distribution Functions : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1729-A %F EDOC: 537321 %R 10.1145/1833349.1778834 %7 2010 %D 2010 %X In fluorescent materials, light from a certain band of incident wavelengths is <br>reradiated at longer wavelengths, i.e., with a reduced per-photon energy. While <br>fluorescent materials are common in everyday life, they have received little <br>attention in computer graphics. Especially, no bidirectional reradiation <br>measurements of fluorescent materials have been available so far. In this <br>paper, we extend the well-known concept of the bidirectional reflectance <br>distribution function (BRDF) to account for energy transfer between <br>wavelengths, resulting in a Bispectral Bidirectional Reflectance and <br>Reradiation Distribution Function (bispectral BRRDF). Using a bidirectional and <br>bispectral measurement setup, we acquire reflectance and reradiation data of a <br>variety of fluorescent materials, including vehicle paints, paper and fabric, <br>and compare their renderings with RGB, RGBxRGB, and spectral BRDFs. Our <br>acquisition is guided by a principal component analysis on complete bispectral <br>data taken under a sparse set of angles. We show that in order to faithfully <br>reproduce the full bispectral information for all other angles, only a very <br>small number of wavelength pairs needs to be measured at a high angular <br>resolution. %J ACM Transactions on Graphics %O TOG %V 29 %N 4 %& 1 %P 1 - 7 %Z sequence number: 97 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2010 %O ACM SIGGRAPH 2010 Los Angeles, CA ; [25-29 July 2010] %@ false
Herzog, R., Eisemann, E., Myszkowski, K., and Seidel, H.-P. 2010. Spatio-Temporal Upsampling on the GPU. Proceedings I3D 2010, ACM.
Abstract
Pixel processing is becoming increasingly expensive for real-time applications due to the complexity of today's shaders and high-resolution framebuffers. However, most shading results are spatially or temporally coherent, which allows for sparse sampling and reuse of neighboring pixel values. This paper proposes a simple framework for spatio-temporal upsampling on modern GPUs. In contrast to previous work, which focuses either on temporal or spatial processing on the GPU, we exploit coherence in both. Our algorithm combines adaptive motion-compensated filtering over time and geometry-aware upsampling in image space. It is robust with respect to high-frequency temporal changes, and achieves substantial performance improvements by limiting the number of recomputed samples per frame. At the same time, we increase the quality of spatial upsampling by recovering missing information from previous frames. This temporal strategy also allows us to ensure that the image converges to a higher quality result.
Export
BibTeX
@inproceedings{HerzogI3D2010, TITLE = {Spatio-Temporal Upsampling on the {GPU}}, AUTHOR = {Herzog, Robert and Eisemann, Elmar and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-60558-939-8}, DOI = {10.1145/1730804.1730819}, PUBLISHER = {ACM}, YEAR = {2010}, DATE = {2010}, ABSTRACT = {Pixel processing is becoming increasingly expensive for real-time applications due to the complexity of today's shaders and high-resolution framebuffers. However, most shading results are spatially or temporally coherent, which allows for sparse sampling and reuse of neighboring pixel values. This paper proposes a simple framework for spatio-temporal upsampling on modern GPUs. In contrast to previous work, which focuses either on temporal or spatial processing on the GPU, we exploit coherence in both. Our algorithm combines adaptive motion-compensated filtering over time and geometry-aware upsampling in image space. It is robust with respect to high-frequency temporal changes, and achieves substantial performance improvements by limiting the number of recomputed samples per frame. At the same time, we increase the quality of spatial upsampling by recovering missing information from previous frames. This temporal strategy also allows us to ensure that the image converges to a higher quality result.}, BOOKTITLE = {Proceedings I3D 2010}, EDITOR = {Varshney, Amitabh and Wyman, Chris and Aliaga, Daniel and Oliveira, Manuel M.}, PAGES = {91--98}, ADDRESS = {Washington DC, USA}, }
Endnote
%0 Conference Proceedings %A Herzog, Robert %A Eisemann, Elmar %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Spatio-Temporal Upsampling on the GPU : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-178C-C %F EDOC: 537285 %R 10.1145/1730804.1730819 %D 2010 %B ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games %Z date of event: 2010-02-19 - 2010-02-21 %C Washington DC, USA %X Pixel processing is becoming increasingly expensive for real-time applications due to the complexity of today's shaders and high-resolution framebuffers. However, most shading results are spatially or temporally coherent, which allows for sparse sampling and reuse of neighboring pixel values. This paper proposes a simple framework for spatio-temporal upsampling on modern GPUs. In contrast to previous work, which focuses either on temporal or spatial processing on the GPU, we exploit coherence in both. Our algorithm combines adaptive motion-compensated filtering over time and geometry-aware upsampling in image space. It is robust with respect to high-frequency temporal changes, and achieves substantial performance improvements by limiting the number of recomputed samples per frame. At the same time, we increase the quality of spatial upsampling by recovering missing information from previous frames. This temporal strategy also allows us to ensure that the image converges to a higher quality result. %B Proceedings I3D 2010 %E Varshney, Amitabh; Wyman, Chris; Aliaga, Daniel; Oliveira, Manuel M. %P 91 - 98 %I ACM %@ 978-1-60558-939-8
Hasler, N., Thormählen, T., Rosenhahn, B., and Seidel, H.-P. 2010a. Learning Skeletons for Shape and Pose. Proceedings I3D 2010, ACM.
Export
BibTeX
@inproceedings{HasThoRosSei10Skeleton, TITLE = {Learning Skeletons for Shape and Pose}, AUTHOR = {Hasler, Nils and Thorm{\"a}hlen, Thorsten and Rosenhahn, Bodo and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-60558-939-8}, DOI = {10.1145/1730804.1730809}, PUBLISHER = {ACM}, YEAR = {2010}, DATE = {2010}, BOOKTITLE = {Proceedings I3D 2010}, EDITOR = {Varshney, Amitabh and Wyman, Chris and Aliaga, Daniel and Oliveira, Manuel M.}, PAGES = {23--30}, ADDRESS = {Washington, DC, USA}, }
Endnote
%0 Conference Proceedings %A Hasler, Nils %A Thorm&#228;hlen, Thorsten %A Rosenhahn, Bodo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Learning Skeletons for Shape and Pose : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1763-6 %F EDOC: 537281 %R 10.1145/1730804.1730809 %D 2010 %B ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games %Z date of event: 2010-02-19 - 2010-02-21 %C Washington, DC, USA %B Proceedings I3D 2010 %E Varshney, Amitabh; Wyman, Chris; Aliaga, Daniel; Oliveira, Manuel M. %P 23 - 30 %I ACM %@ 978-1-60558-939-8
Hasler, N., Ackermann, H., Rosenhahn, B., Thormählen, T., and Seidel, H.-P. 2010b. Multilinear Pose and Body Shape Estimation of Dressed Subjects from Image Sets. IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2010), IEEE.
Export
BibTeX
@inproceedings{Hasler2010Multilinear, TITLE = {Multilinear Pose and Body Shape Estimation of Dressed Subjects from Image Sets}, AUTHOR = {Hasler, Nils and Ackermann, Hanno and Rosenhahn, Bodo and Thorm{\"a}hlen, Thorsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-424-46984-0}, DOI = {10.1109/CVPR.2010.5539853}, PUBLISHER = {IEEE}, YEAR = {2010}, DATE = {2010}, BOOKTITLE = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2010)}, PAGES = {1823--1830}, ADDRESS = {San Francisco, CA, USA}, }
Endnote
%0 Conference Proceedings %A Hasler, Nils %A Ackermann, Hanno %A Rosenhahn, Bodo %A Thorm&#228;hlen, Thorsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Multilinear Pose and Body Shape Estimation of Dressed Subjects from Image Sets : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-176B-5 %F EDOC: 537300 %R 10.1109/CVPR.2010.5539853 %D 2010 %B IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2010-06-13 - 2010-06-18 %C San Francisco, CA, USA %B IEEE Conference on Computer Vision and Pattern Recognition %P 1823 - 1830 %I IEEE %@ 978-1-424-46984-0
Granados, M., Adjin, B., Wand, M., Theobalt, C., Seidel, H.-P., and Lensch, H.P.A. 2010. Optimal HDR Reconstruction with Linear Digital Cameras. IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2010), IEEE.
Abstract
Given a multi-exposure sequence of a scene, our aim is to recover the absolute irradiance falling onto a linear camera sensor. The established approach is to perform a weighted average of the scaled input exposures. However, there is no clear consensus on the appropriate weighting to use. We propose a weighting function that produces statistically optimal estimates under the assumption of compound- Gaussian noise. Our weighting is based on a calibrated camera model that accounts for all noise sources. This model also allows us to simultaneously estimate the irradiance and its uncertainty. We evaluate our method on simulated and real world photographs, and show that we consistently improve the signal-to-noise ratio over previous approaches. Finally, we show the effectiveness of our model for optimal exposure sequence selection and HDR image denoising.
Export
BibTeX
@inproceedings{Granados2010, TITLE = {Optimal {HDR} Reconstruction with Linear Digital Cameras}, AUTHOR = {Granados, Miguel and Adjin, Boris and Wand, Michael and Theobalt, Christian and Seidel, Hans-Peter and Lensch, Hendrik P. A.}, LANGUAGE = {eng}, ISBN = {978-1-4244-6983-3}, DOI = {10.1109/CVPR.2010.5540208}, PUBLISHER = {IEEE}, YEAR = {2010}, DATE = {2010}, ABSTRACT = {Given a multi-exposure sequence of a scene, our aim is to recover the absolute irradiance falling onto a linear camera sensor. The established approach is to perform a weighted average of the scaled input exposures. However, there is no clear consensus on the appropriate weighting to use. We propose a weighting function that produces statistically optimal estimates under the assumption of compound- Gaussian noise. Our weighting is based on a calibrated camera model that accounts for all noise sources. This model also allows us to simultaneously estimate the irradiance and its uncertainty. We evaluate our method on simulated and real world photographs, and show that we consistently improve the signal-to-noise ratio over previous approaches. Finally, we show the effectiveness of our model for optimal exposure sequence selection and HDR image denoising.}, BOOKTITLE = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2010)}, PAGES = {215--222}, ADDRESS = {San Francisco, CA, USA}, }
Endnote
%0 Conference Proceedings %A Granados, Miguel %A Adjin, Boris %A Wand, Michael %A Theobalt, Christian %A Seidel, Hans-Peter %A Lensch, Hendrik P. A. %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Optimal HDR Reconstruction with Linear Digital Cameras : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1771-6 %F EDOC: 537323 %R 10.1109/CVPR.2010.5540208 %D 2010 %B IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2010-06-13 - 2010-06-18 %C San Francisco, CA, USA %X Given a multi-exposure sequence of a scene, our aim is to recover the absolute irradiance falling onto a linear camera sensor. The established approach is to perform a weighted average of the scaled input exposures. However, there is no clear consensus on the appropriate weighting to use. We propose a weighting function that produces statistically optimal estimates under the assumption of compound- Gaussian noise. Our weighting is based on a calibrated camera model that accounts for all noise sources. This model also allows us to simultaneously estimate the irradiance and its uncertainty. We evaluate our method on simulated and real world photographs, and show that we consistently improve the signal-to-noise ratio over previous approaches. Finally, we show the effectiveness of our model for optimal exposure sequence selection and HDR image denoising. %B IEEE Conference on Computer Vision and Pattern Recognition %P 215 - 222 %I IEEE %@ 978-1-4244-6983-3
Gall, J., Rosenhahn, B., Brox, T., and Seidel, H.-P. 2010. Optimization and Filtering for Human Motion Capture : A Multi-Layer Framework. International Journal of Computer Vision87, 1-2.
Abstract
Local optimization and filtering have been widely applied to model-based 3D<br>human motion capture. Global stochastic optimization has recently been<br>proposed as promising alternative solution for tracking and initialization. In<br>order to benefit from optimization and filtering, we introduce a multi-layer<br>framework that combines stochastic optimization, filtering, and local<br>optimization. While the first layer relies on interacting simulated<br>annealing and some weak prior information on physical constraints, the second<br>layer refines the estimates by filtering and local optimization such that the<br>accuracy is increased and ambiguities are resolved over time without imposing<br>restrictions on the dynamics. In our experimental evaluation, we demonstrate<br>the significant improvements of the multi-layer framework and provide<br>quantitative 3D pose tracking results for the complete \texttt{HumanEva-II} <br>dataset.<br>The paper further comprises a comparison of global stochastic optimization<br>with particle filtering, annealed particle filtering, and local optimization.
Export
BibTeX
@article{Gall2008c, TITLE = {Optimization and Filtering for Human Motion Capture : A Multi-Layer Framework}, AUTHOR = {Gall, J{\"u}rgen and Rosenhahn, Bodo and Brox, Thomas and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0920-5691}, DOI = {10.1007/s11263-008-0173-1}, PUBLISHER = {Kluwer Academic Publishers}, ADDRESS = {Hingham, Mass.}, YEAR = {2010}, DATE = {2010}, ABSTRACT = {Local optimization and filtering have been widely applied to model-based 3D<br>human motion capture. Global stochastic optimization has recently been<br>proposed as promising alternative solution for tracking and initialization. In<br>order to benefit from optimization and filtering, we introduce a multi-layer<br>framework that combines stochastic optimization, filtering, and local<br>optimization. While the first layer relies on interacting simulated<br>annealing and some weak prior information on physical constraints, the second<br>layer refines the estimates by filtering and local optimization such that the<br>accuracy is increased and ambiguities are resolved over time without imposing<br>restrictions on the dynamics. In our experimental evaluation, we demonstrate<br>the significant improvements of the multi-layer framework and provide<br>quantitative 3D pose tracking results for the complete \texttt{HumanEva-II} <br>dataset.<br>The paper further comprises a comparison of global stochastic optimization<br>with particle filtering, annealed particle filtering, and local optimization.}, JOURNAL = {International Journal of Computer Vision}, VOLUME = {87}, NUMBER = {1-2}, PAGES = {75--92}, }
Endnote
%0 Journal Article %A Gall, J&#252;rgen %A Rosenhahn, Bodo %A Brox, Thomas %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Optimization and Filtering for Human Motion Capture : A Multi-Layer Framework : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1773-2 %F EDOC: 537278 %R 10.1007/s11263-008-0173-1 %7 2008-11-15 %D 2010 %* Review method: peer-reviewed %X Local optimization and filtering have been widely applied to model-based 3D<br>human motion capture. Global stochastic optimization has recently been<br>proposed as promising alternative solution for tracking and initialization. In<br>order to benefit from optimization and filtering, we introduce a multi-layer<br>framework that combines stochastic optimization, filtering, and local<br>optimization. While the first layer relies on interacting simulated<br>annealing and some weak prior information on physical constraints, the second<br>layer refines the estimates by filtering and local optimization such that the<br>accuracy is increased and ambiguities are resolved over time without imposing<br>restrictions on the dynamics. In our experimental evaluation, we demonstrate<br>the significant improvements of the multi-layer framework and provide<br>quantitative 3D pose tracking results for the complete \texttt{HumanEva-II} <br>dataset.<br>The paper further comprises a comparison of global stochastic optimization<br>with particle filtering, annealed particle filtering, and local optimization. %J International Journal of Computer Vision %O Int. J. Comput. Vis. %V 87 %N 1-2 %& 75 %P 75 - 92 %I Kluwer Academic Publishers %C Hingham, Mass. %@ false %U https://rdcu.be/dJxj8
Fuchs, M., Chen, T., Wang, O., Raskar, R., Seidel, H.-P., and Lensch, H.P.A. 2010. Real-Time Temporal Shaping of High-Speed Video Streams. Computers & Graphics (Proc. SBIM 2009)34, 5.
Abstract
Digital movie cameras only perform a discrete sampling of real-world imagery. While spatial sampling effects are well studied in the literature, there has not been as much work in regards to temporal sampling. As cameras get faster and faster, the need for conventional frame-rate video that matches the abilities of human perception remains. In this article, we introduce a system with controlled temporal sampling behavior. It transforms a high fps input stream into a conventional speed output video in real-time. We investigate the effect of different temporal sampling kernels and demonstrate that extended, overlapping kernels can mitigate aliasing artifacts. Furthermore, NPR effects, such as enhanced motion blur, can be achieved. By applying Fourier transforms in the temporal domain, we can also obtain novel tools for analyzing and visualizing time dependent effects. We study the properties of both contemporary and idealized display devices and demonstrate the effect of different sampling kernels in creating enhanced movies and stills of fast motion.
Export
BibTeX
@article{Fuchs2009, TITLE = {Real-Time Temporal Shaping of High-Speed Video Streams}, AUTHOR = {Fuchs, Martin and Chen, Tongbo and Wang, Oliver and Raskar, Ramesh and Seidel, Hans-Peter and Lensch, Hendrik P. A.}, LANGUAGE = {eng}, ISSN = {0097-8493}, DOI = {10.1016/j.cag.2010.05.017}, PUBLISHER = {Elsevier}, ADDRESS = {Amsterdam}, YEAR = {2010}, DATE = {2010}, ABSTRACT = {Digital movie cameras only perform a discrete sampling of real-world imagery. While spatial sampling effects are well studied in the literature, there has not been as much work in regards to temporal sampling. As cameras get faster and faster, the need for conventional frame-rate video that matches the abilities of human perception remains. In this article, we introduce a system with controlled temporal sampling behavior. It transforms a high fps input stream into a conventional speed output video in real-time. We investigate the effect of different temporal sampling kernels and demonstrate that extended, overlapping kernels can mitigate aliasing artifacts. Furthermore, NPR effects, such as enhanced motion blur, can be achieved. By applying Fourier transforms in the temporal domain, we can also obtain novel tools for analyzing and visualizing time dependent effects. We study the properties of both contemporary and idealized display devices and demonstrate the effect of different sampling kernels in creating enhanced movies and stills of fast motion.}, JOURNAL = {Computers \& Graphics (Proc. SBIM)}, VOLUME = {34}, NUMBER = {5}, PAGES = {575--584}, BOOKTITLE = {Extended papers from the 2009 Sketch-Based Interfaces and Modeling Conference Vision, Modeling \& Visualization (SBIM 2009)}, }
Endnote
%0 Journal Article %A Fuchs, Martin %A Chen, Tongbo %A Wang, Oliver %A Raskar, Ramesh %A Seidel, Hans-Peter %A Lensch, Hendrik P. A. %+ External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Real-Time Temporal Shaping of High-Speed Video Streams : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1783-D %F EDOC: 537299 %R 10.1016/j.cag.2010.05.017 %7 2010 %D 2010 %* Review method: peer-reviewed %X Digital movie cameras only perform a discrete sampling of real-world imagery. While spatial sampling effects are well studied in the literature, there has not been as much work in regards to temporal sampling. As cameras get faster and faster, the need for conventional frame-rate video that matches the abilities of human perception remains. In this article, we introduce a system with controlled temporal sampling behavior. It transforms a high fps input stream into a conventional speed output video in real-time. We investigate the effect of different temporal sampling kernels and demonstrate that extended, overlapping kernels can mitigate aliasing artifacts. Furthermore, NPR effects, such as enhanced motion blur, can be achieved. By applying Fourier transforms in the temporal domain, we can also obtain novel tools for analyzing and visualizing time dependent effects. We study the properties of both contemporary and idealized display devices and demonstrate the effect of different sampling kernels in creating enhanced movies and stills of fast motion. %J Computers & Graphics %V 34 %N 5 %& 575 %P 575 - 584 %I Elsevier %C Amsterdam %@ false %B Extended papers from the 2009 Sketch-Based Interfaces and Modeling Conference Vision, Modeling & Visualization %O SBIM 2009 Sixth Eurographics/ACM Symposium on Sketch-Based Interfaces and Modeling
Eisemann, M., Eisemann, E., Seidel, H.-P., and Magnor, M.A. 2010. Photo Zoom: High Resolution from Unordered Image Collections. SIGGRAPH ’10: Special Interest Group on Computer Graphics and Interactive Techniques Conference, ACM.
Export
BibTeX
@inproceedings{DBLP:conf/siggraph/EisemannESM10, TITLE = {Photo Zoom: High Resolution from Unordered Image Collections}, AUTHOR = {Eisemann, Martin and Eisemann, Elmar and Seidel, Hans-Peter and Magnor, Marcus A.}, LANGUAGE = {eng}, ISBN = {978-1-4503-0393-4}, DOI = {10.1145/1836845.1836986}, PUBLISHER = {ACM}, YEAR = {2010}, DATE = {2010}, BOOKTITLE = {SIGGRAPH '10: Special Interest Group on Computer Graphics and Interactive Techniques Conference}, EDITOR = {Grimm, Cindy}, PAGES = {132:1--132:1}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Eisemann, Martin %A Eisemann, Elmar %A Seidel, Hans-Peter %A Magnor, Marcus A. %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Photo Zoom: High Resolution from Unordered Image Collections : %G eng %U http://hdl.handle.net/21.11116/0000-000F-5B00-B %R 10.1145/1836845.1836986 %D 2010 %B International Conference on Computer Graphics and Interactive Techniques 2010 %Z date of event: 2010-07-26 - 2010-07-30 %C Los Angeles, CA, USA %B SIGGRAPH '10: Special Interest Group on Computer Graphics and Interactive Techniques Conference %E Grimm, Cindy %P 132:1 - 132:1 %I ACM %@ 978-1-4503-0393-4
Didyk, P., Ritschel, T., Eisemann, E., Myszkowski, K., and Seidel, H.-P. 2010a. Adaptive Image-space Stereo View Synthesis. Vision, Modeling & Visualization (VMV 2010), Eurographics Association.
Export
BibTeX
@inproceedings{Didyk2010b, TITLE = {Adaptive Image-space Stereo View Synthesis}, AUTHOR = {Didyk, Piotr and Ritschel, Tobias and Eisemann, Elmar and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-905673-79-1}, DOI = {10.2312/PE/VMV/VMV10/299-306}, PUBLISHER = {Eurographics Association}, YEAR = {2010}, DATE = {2010}, BOOKTITLE = {Vision, Modeling \& Visualization (VMV 2010)}, EDITOR = {Koch, Reinhard and Kolb, Andreas and Rezk-Salama, Christof}, PAGES = {299--306}, ADDRESS = {Siegen, Germany}, }
Endnote
%0 Conference Proceedings %A Didyk, Piotr %A Ritschel, Tobias %A Eisemann, Elmar %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Adaptive Image-space Stereo View Synthesis : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-172C-4 %F EDOC: 537308 %R 10.2312/PE/VMV/VMV10/299-306 %D 2010 %B 15th International Workshop on Vision, Modeling, and Visualization %Z date of event: 2010-11-15 - 2010-11-02 %C Siegen, Germany %B Vision, Modeling & Visualization %E Koch, Reinhard; Kolb, Andreas; Rezk-Salama, Christof %P 299 - 306 %I Eurographics Association %@ 978-3-905673-79-1
Didyk, P., Eisemann, E., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2010b. Apparent Display Resolution Enhancement for Moving Images. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2010)29, 4.
Export
BibTeX
@article{Didyk2010a, TITLE = {Apparent Display Resolution Enhancement for Moving Images}, AUTHOR = {Didyk, Piotr and Eisemann, Elmar and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, ISBN = {978-1-4503-0210-4}, DOI = {10.1145/1778765.1778850}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2010}, DATE = {2010}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {29}, NUMBER = {4}, PAGES = {1--8}, EID = {113}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2010}, EDITOR = {Hoppe, Hugues}, }
Endnote
%0 Journal Article %A Didyk, Piotr %A Eisemann, Elmar %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Apparent Display Resolution Enhancement for Moving Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1734-0 %F EDOC: 537269 %R 10.1145/1778765.1778850 %7 2010 %D 2010 %J ACM Transactions on Graphics %O TOG %V 29 %N 4 %& 1 %P 1 - 8 %Z sequence number: 113 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2010 %O ACM SIGGRAPH 2010 Los Angeles, CA ; [25-29 July 2010] %@ 978-1-4503-0210-4
Didyk, P., Eisemann, E., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2010c. Perceptually-motivated Real-time Temporal Upsampling of 3D Content for High-refresh-rate Displays. Computer Graphics Forum (Proc. EUROGRAPHICS 2010)29, 2.
Abstract
High-refresh-rate displays (e.\,g.,~120\,Hz) have recently become available on the consumer market and quickly gain on popularity. One of their aims is to reduce the perceived blur created by moving objects that are tracked by the human eye. However, an improvement is only achieved if the video stream is produced at the same high refresh rate (i.\,e.~120\,Hz). Some devices, such as LCD~TVs, solve this problem by converting low-refresh-rate content (i.\,e.~50\,Hz~PAL) into a higher temporal resolution (i.\,e.~200\,Hz) based on two-dimensional optical flow. In our approach, we will show how rendered three-dimensional images produced by recent graphics hardware can be up-sampled more efficiently resulting in higher quality at the same time. Our algorithm relies on several perceptual findings and preserves the naturalness of the original sequence. A psychophysical study validates our approach and illustrates that temporally up-sampled video streams are preferred over the standard low-rate input by the majority of users. We show that our solution improves task performance on high-refresh-rate displays.
Export
BibTeX
@article{Didyk2010, TITLE = {Perceptually-motivated Real-time Temporal Upsampling of {3D} Content for High-refresh-rate Displays}, AUTHOR = {Didyk, Piotr and Eisemann, Elmar and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2009.01641.x}, PUBLISHER = {Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2010}, DATE = {2010}, ABSTRACT = {High-refresh-rate displays (e.\,g.,~120\,Hz) have recently become available on the consumer market and quickly gain on popularity. One of their aims is to reduce the perceived blur created by moving objects that are tracked by the human eye. However, an improvement is only achieved if the video stream is produced at the same high refresh rate (i.\,e.~120\,Hz). Some devices, such as LCD~TVs, solve this problem by converting low-refresh-rate content (i.\,e.~50\,Hz~PAL) into a higher temporal resolution (i.\,e.~200\,Hz) based on two-dimensional optical flow. In our approach, we will show how rendered three-dimensional images produced by recent graphics hardware can be up-sampled more efficiently resulting in higher quality at the same time. Our algorithm relies on several perceptual findings and preserves the naturalness of the original sequence. A psychophysical study validates our approach and illustrates that temporally up-sampled video streams are preferred over the standard low-rate input by the majority of users. We show that our solution improves task performance on high-refresh-rate displays.}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {29}, NUMBER = {2}, PAGES = {713--722}, BOOKTITLE = {EUROGRAPHICS 2010}, EDITOR = {Akenine-M{\"o}ller, Tomas and Zwicker, Matthias}, }
Endnote
%0 Journal Article %A Didyk, Piotr %A Eisemann, Elmar %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptually-motivated Real-time Temporal Upsampling of 3D Content for High-refresh-rate Displays : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1778-7 %F EDOC: 537284 %R 10.1111/j.1467-8659.2009.01641.x %7 2010 %D 2010 %X High-refresh-rate displays (e.\,g.,~120\,Hz) have recently become available on the consumer market and quickly gain on popularity. One of their aims is to reduce the perceived blur created by moving objects that are tracked by the human eye. However, an improvement is only achieved if the video stream is produced at the same high refresh rate (i.\,e.~120\,Hz). Some devices, such as LCD~TVs, solve this problem by converting low-refresh-rate content (i.\,e.~50\,Hz~PAL) into a higher temporal resolution (i.\,e.~200\,Hz) based on two-dimensional optical flow. In our approach, we will show how rendered three-dimensional images produced by recent graphics hardware can be up-sampled more efficiently resulting in higher quality at the same time. Our algorithm relies on several perceptual findings and preserves the naturalness of the original sequence. A psychophysical study validates our approach and illustrates that temporally up-sampled video streams are preferred over the standard low-rate input by the majority of users. We show that our solution improves task performance on high-refresh-rate displays. %J Computer Graphics Forum %V 29 %N 2 %& 713 %P 713 - 722 %I Blackwell %C Oxford, UK %@ false %B EUROGRAPHICS 2010 %O EUROGRAPHICS 2010 The European Association for Computer Graphics 31st Annual Conference ; Norrk&#246;ping, Sweden, May3rd - 7th, 2010 EG 2010
Bokeloh, M., Wand, M., and Seidel, H.-P. 2010. A Connection between Partial Symmetry and Inverse Procedural Modeling. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2010)29, 4.
Export
BibTeX
@article{Bokeloh2010, TITLE = {A Connection between Partial Symmetry and Inverse Procedural Modeling}, AUTHOR = {Bokeloh, Martin and Wand, Michael and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, ISBN = {978-1-4503-0210-4}, DOI = {10.1145/1778765.1778841}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2010}, DATE = {2010}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, EDITOR = {Hoppe, Hugues}, VOLUME = {29}, NUMBER = {4}, PAGES = {1--10}, EID = {104}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2010}, }
Endnote
%0 Journal Article %A Bokeloh, Martin %A Wand, Michael %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T A Connection between Partial Symmetry and Inverse Procedural Modeling : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1724-3 %F EDOC: 537324 %R 10.1145/1778765.1778841 %7 2010 %D 2010 %J ACM Transactions on Graphics %O TOG %V 29 %N 4 %& 1 %P 1 - 10 %Z sequence number: 104 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2010 %O ACM SIGGRAPH 2010 Los Angeles, CA ; [25-29 July 2010] %@ 978-1-4503-0210-4
Baak, A., Helten, T., Müller, M., Pons-Moll, G., Rosenhahn, B., and Seidel, H.-P. 2010. Analyzing and Evaluating Markerless Motion Tracking Using Inertial Sensors. Trends and Topics in Computer Vision (ECCV 2010), Springer.
Export
BibTeX
@inproceedings{DBLP:conf/eccv/BaakHMPRS10, TITLE = {Analyzing and Evaluating Markerless Motion Tracking Using Inertial Sensors}, AUTHOR = {Baak, Andreas and Helten, Thomas and M{\"u}ller, Meinard and Pons-Moll, Gerard and Rosenhahn, Bodo and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-642-35748-0}, DOI = {10.1007/978-3-642-35749-7_11}, PUBLISHER = {Springer}, YEAR = {2010}, DATE = {2010}, BOOKTITLE = {Trends and Topics in Computer Vision (ECCV 2010)}, EDITOR = {Kutulakos, Kiriakos N.}, VOLUME = {6553}, PAGES = {139--152}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {6553}, ADDRESS = {Heraklion, Crete, Greece}, }
Endnote
%0 Conference Proceedings %A Baak, Andreas %A Helten, Thomas %A M&#252;ller, Meinard %A Pons-Moll, Gerard %A Rosenhahn, Bodo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Analyzing and Evaluating Markerless Motion Tracking Using Inertial Sensors : %G eng %U http://hdl.handle.net/21.11116/0000-000F-5B0E-D %R 10.1007/978-3-642-35749-7_11 %D 2010 %B ECCV 2010 Workshops %Z date of event: 2010-09-10 - 2010-09-11 %C Heraklion, Crete, Greece %B Trends and Topics in Computer Vision %E Kutulakos, Kiriakos N. %V 6553 %P 139 - 152 %I Springer %@ 978-3-642-35748-0 %B Lecture Notes in Computer Science %N 6553 %U https://rdcu.be/dJwWj
Aydin, T.O., Čadík, M., Myszkowski, K., and Seidel, H.-P. 2010a. Video Quality Assessment for Computer Graphics Applications. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2010)29, 6.
Export
BibTeX
@article{TuncSGAsia2010, TITLE = {Video Quality Assessment for Computer Graphics Applications}, AUTHOR = {Aydin, Tunc Ozan and {\v C}ad{\'i}k, Martin and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, ISBN = {978-1-4503-0439-9}, DOI = {10.1145/1866158.1866187}, LOCALID = {Local-ID: C125675300671F7B-0ED72325CD8F187FC12577CF005BA5C5-TuncSGAsia2010}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2010}, DATE = {2010}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {29}, NUMBER = {6}, PAGES = {1--12}, EID = {161}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2010}, EDITOR = {Drettakis, George}, }
Endnote
%0 Journal Article %A Aydin, Tunc Ozan %A &#268;ad&#237;k, Martin %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Video Quality Assessment for Computer Graphics Applications : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1797-0 %F EDOC: 537307 %R 10.1145/1866158.1866187 %F OTHER: Local-ID: C125675300671F7B-0ED72325CD8F187FC12577CF005BA5C5-TuncSGAsia2010 %D 2010 %J ACM Transactions on Graphics %V 29 %N 6 %& 1 %P 1 - 12 %Z sequence number: 161 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2010 %O ACM SIGGRAPH Asia 2010 Seoul, South Korea %@ 978-1-4503-0439-9
Aydin, T.O., Čadík, M., Myszkowski, K., and Seidel, H.-P. 2010b. Visually Significant Edges. ACM Transactions on Applied Perception7, 4.
Abstract
Numerous image processing and computer graphics methods make use of either explicitly computed strength of image edges, or an implicit edge strength definition that is integrated into their algorithms. In both cases, the end result is highly affected by the computation of edge strength. We address several shortcomings of the widely used gradient magnitude based edge strength model through the computation of a hypothetical human visual system (HVS) response at edge locations. Contrary to gradient magnitude, the resulting ``visual significance'' values account for various HVS mechanisms such as luminance adaptation and visual masking, and are scaled in perceptually linear units that are uniform across images. The visual significance computation is implemented in a fast multi-scale second generation wavelet framework,which we use to demonstrate the differences in image retargeting, HDR image stitching and tone mapping applications with respect to gradient magnitude model. Our results suggest that simple perceptual models provide qualitative improvements on applications utilizing edge strength at the cost of a modest computational burden.
Export
BibTeX
@article{TuncTAP2010, TITLE = {Visually Significant Edges}, AUTHOR = {Aydin, Tunc Ozan and {\v C}ad{\'i}k, Martin and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1544-3558}, DOI = {10.1145/1823738.1823745}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2010}, DATE = {2010}, ABSTRACT = {Numerous image processing and computer graphics methods make use of either explicitly computed strength of image edges, or an implicit edge strength definition that is integrated into their algorithms. In both cases, the end result is highly affected by the computation of edge strength. We address several shortcomings of the widely used gradient magnitude based edge strength model through the computation of a hypothetical human visual system (HVS) response at edge locations. Contrary to gradient magnitude, the resulting ``visual significance'' values account for various HVS mechanisms such as luminance adaptation and visual masking, and are scaled in perceptually linear units that are uniform across images. The visual significance computation is implemented in a fast multi-scale second generation wavelet framework,which we use to demonstrate the differences in image retargeting, HDR image stitching and tone mapping applications with respect to gradient magnitude model. Our results suggest that simple perceptual models provide qualitative improvements on applications utilizing edge strength at the cost of a modest computational burden.}, JOURNAL = {ACM Transactions on Applied Perception}, VOLUME = {7}, NUMBER = {4}, PAGES = {1--14}, EID = {27}, }
Endnote
%0 Journal Article %A Aydin, Tunc Ozan %A &#268;ad&#237;k, Martin %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Visually Significant Edges : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-179A-A %F EDOC: 537306 %R 10.1145/1823738.1823745 %7 2010 %D 2010 %* Review method: peer-reviewed %X Numerous image processing and computer graphics methods make use of either explicitly computed strength of image edges, or an implicit edge strength definition that is integrated into their algorithms. In both cases, the end result is highly affected by the computation of edge strength. We address several shortcomings of the widely used gradient magnitude based edge strength model through the computation of a hypothetical human visual system (HVS) response at edge locations. Contrary to gradient magnitude, the resulting ``visual significance'' values account for various HVS mechanisms such as luminance adaptation and visual masking, and are scaled in perceptually linear units that are uniform across images. The visual significance computation is implemented in a fast multi-scale second generation wavelet framework,which we use to demonstrate the differences in image retargeting, HDR image stitching and tone mapping applications with respect to gradient magnitude model. Our results suggest that simple perceptual models provide qualitative improvements on applications utilizing edge strength at the cost of a modest computational burden. %J ACM Transactions on Applied Perception %V 7 %N 4 %& 1 %P 1 - 14 %Z sequence number: 27 %I ACM %C New York, NY %@ false
Adams, B., Wicke, M., Ovsjanikov, M., Wand, M., Seidel, H.-P., and Guibas, L. 2010. Meshless Shape and Motion Design for Multiple Deformable Objects. Computer Graphics Forum29, 1.
Export
BibTeX
@article{Adams2010z, TITLE = {Meshless Shape and Motion Design for Multiple Deformable Objects}, AUTHOR = {Adams, Bart and Wicke, Martin and Ovsjanikov, Maks and Wand, Michael and Seidel, Hans-Peter and Guibas, Leonidas}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2009.01536.x}, PUBLISHER = {Blackwell}, ADDRESS = {Oxford}, YEAR = {2010}, DATE = {2010}, JOURNAL = {Computer Graphics Forum}, VOLUME = {29}, NUMBER = {1}, PAGES = {43--59}, }
Endnote
%0 Journal Article %A Adams, Bart %A Wicke, Martin %A Ovsjanikov, Maks %A Wand, Michael %A Seidel, Hans-Peter %A Guibas, Leonidas %+ External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Meshless Shape and Motion Design for Multiple Deformable Objects : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1765-2 %F EDOC: 537277 %R 10.1111/j.1467-8659.2009.01536.x %7 2010 %D 2010 %* Review method: peer-reviewed %J Computer Graphics Forum %V 29 %N 1 %& 43 %P 43 - 59 %I Blackwell %C Oxford %@ false
2009
Zimmer, H.L., Bruhn, A., Weickert, J., et al. 2009a. Complementary Optic Flow. Energy Minimization Methods in Computer Vision and Pattern Recognition (EMMCVPR 2009), Springer.
Export
BibTeX
@inproceedings{Zimmer2009, TITLE = {Complementary Optic Flow}, AUTHOR = {Zimmer, Henning Lars and Bruhn, Andr{\'e}s and Weickert, Joachim and Valgaerts, Levi and Salgado, Agust\'in and Rosenhahn, Bodo and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-642-03640-6}, DOI = {10.1007/978-3-642-03641-5_16}, PUBLISHER = {Springer}, YEAR = {2009}, DATE = {2009}, BOOKTITLE = {Energy Minimization Methods in Computer Vision and Pattern Recognition (EMMCVPR 2009)}, EDITOR = {Cremers, Daniel and Boykov, Yuri and Blake, Andrew and Schmidt, Frank R.}, PAGES = {207--220}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {5681}, ADDRESS = {Bonn, Germany}, }
Endnote
%0 Conference Proceedings %A Zimmer, Henning Lars %A Bruhn, Andr&#233;s %A Weickert, Joachim %A Valgaerts, Levi %A Salgado, Agust&#769;in %A Rosenhahn, Bodo %A Seidel, Hans-Peter %+ International Max Planck Research School, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Complementary Optic Flow : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5027-C %R 10.1007/978-3-642-03641-5_16 %D 2009 %B 7th International Conference on Energy Minimization Methods in Computer Vision and Pattern Recognition %Z date of event: 2009-08-24 - 2009-08-27 %C Bonn, Germany %B Energy Minimization Methods in Computer Vision and Pattern Recognition %E Cremers, Daniel; Boykov, Yuri; Blake, Andrew; Schmidt, Frank R. %P 207 - 220 %I Springer %@ 3-642-03640-6 %B Lecture Notes in Computer Science %N 5681 %U https://rdcu.be/dJkzm
Zimmer, H.L., Breuß, M., Weickert, J., and Seidel, H.-P. 2009b. Hyperbolic Numerics for Variational Approaches to Correspondence Problems. Scale Space and Variational Methods in Computer Vision (SSVM 2009), Springer.
Export
BibTeX
@inproceedings{DBLP:conf/scalespace/ZimmerBWS09, TITLE = {Hyperbolic Numerics for Variational Approaches to Correspondence Problems}, AUTHOR = {Zimmer, Henning Lars and Breu{\ss}, Michael and Weickert, Joachim and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-642-02255-5}, DOI = {10.1007/978-3-642-02256-2_53}, PUBLISHER = {Springer}, YEAR = {2009}, DATE = {2009}, BOOKTITLE = {Scale Space and Variational Methods in Computer Vision (SSVM 2009)}, EDITOR = {Tai, Xue-Cheng and M{\o}rken, Knut and Lysaker, Marius and Lie, Knut-Andreas}, PAGES = {636--647}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {5567}, ADDRESS = {Voss, Norway}, }
Endnote
%0 Conference Proceedings %A Zimmer, Henning Lars %A Breu&#223;, Michael %A Weickert, Joachim %A Seidel, Hans-Peter %+ International Max Planck Research School, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Hyperbolic Numerics for Variational Approaches to Correspondence Problems : %G eng %U http://hdl.handle.net/21.11116/0000-000F-585D-7 %R 10.1007/978-3-642-02256-2_53 %D 2009 %B Second International Conference on Scale Space and Variational Methods in Computer Vision %Z date of event: 2009-06-01 - 2009-06-05 %C Voss, Norway %B Scale Space and Variational Methods in Computer Vision %E Tai, Xue-Cheng; M&#248;rken, Knut; Lysaker, Marius; Lie, Knut-Andreas %P 636 - 647 %I Springer %@ 978-3-642-02255-5 %B Lecture Notes in Computer Science %N 5567 %U https://rdcu.be/dJepm
Wang, Y.-}Shuen, Fu, H., Sorkine, O., Lee, T.-}Yee, and Seidel, H.-P. 2009. Motion-aware Temporal Coherence for Video Resizing. ACM Transactions on Graphics, ACM.
Export
BibTeX
@inproceedings{DBLP:journals/tog/WangFSLS09, TITLE = {Motion-aware Temporal Coherence for Video Resizing}, AUTHOR = {Wang, Yu{\textbraceleft}-{\textbraceright}Shuen and Fu, Hongbo and Sorkine, Olga and Lee, Tong{\textbraceleft}-{\textbraceright}Yee and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, ISBN = {978-1-60558-858-2}, DOI = {10.1145/1618452.1618473}, PUBLISHER = {ACM}, PUBLISHER = {Association for Computing Machinery}, YEAR = {2009}, DATE = {2009}, BOOKTITLE = {SIGGRAPH Asia '09: ACM SIGGRAPH Asia 2009 papers}, PAGES = {127:1--127:10}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {28}, ISSUE = {5}, ADDRESS = {Yokohama, Japan}, }
Endnote
%0 Conference Proceedings %A Wang, Yu{-}Shuen %A Fu, Hongbo %A Sorkine, Olga %A Lee, Tong{-}Yee %A Seidel, Hans-Peter %+ External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Motion-aware Temporal Coherence for Video Resizing : %G eng %U http://hdl.handle.net/21.11116/0000-000F-59BD-9 %R 10.1145/1618452.1618473 %D 2009 %B ACM SIGGRAPH Asia 2009 %Z date of event: 2009-12-16 - 2009-12-19 %C Yokohama, Japan %B SIGGRAPH Asia '09: ACM SIGGRAPH Asia 2009 papers %P 127:1 - 127:10 %I ACM %@ 978-1-60558-858-2 %J ACM Transactions on Graphics %V 28 %N 5 %I Association for Computing Machinery %@ false
Wand, M., Adams, B., Ovsjanikov, M., et al. 2009. Efficient Reconstruction of Nonrigid Shape and Motion from Real-Time 3D Scanner Data. ACM Transactions on Graphics28, 2.
Export
BibTeX
@article{Wand-et-al_TG09, TITLE = {Efficient Reconstruction of Nonrigid Shape and Motion from Real-Time {3D} Scanner Data}, AUTHOR = {Wand, Michael and Adams, Bart and Ovsjanikov, Maksim and Berner, Alexander and Bokeloh, Martin and Jenke, Philipp and Guibas, Leonidas and Seidel, Hans-Peter and Schilling, Andreas}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/1516522.1516526}, LOCALID = {Local-ID: C125675300671F7B-9866D4932E2574F3C125758200546278-Wand2009}, PUBLISHER = {Association for Computing Machinery}, ADDRESS = {New York, NY}, YEAR = {2009}, DATE = {2009}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {28}, NUMBER = {2}, PAGES = {15:1--15:15}, }
Endnote
%0 Journal Article %A Wand, Michael %A Adams, Bart %A Ovsjanikov, Maksim %A Berner, Alexander %A Bokeloh, Martin %A Jenke, Philipp %A Guibas, Leonidas %A Seidel, Hans-Peter %A Schilling, Andreas %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Efficient Reconstruction of Nonrigid Shape and Motion from Real-Time 3D Scanner Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-19A5-F %F EDOC: 520492 %F OTHER: Local-ID: C125675300671F7B-9866D4932E2574F3C125758200546278-Wand2009 %R 10.1145/1516522.1516526 %D 2009 %* Review method: peer-reviewed %J ACM Transactions on Graphics %V 28 %N 2 %& 15:1 %P 15:1 - 15:15 %I Association for Computing Machinery %C New York, NY %@ false
Tevs, A., Bokeloh, M., Wand, M., Schilling, A., and Seidel, H.-P. 2009. Isometric Registration of Ambiguous and Partial Data. 2009 IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2009), IEEE Computer Society.
Export
BibTeX
@inproceedings{Tevs-et-al_CVPR09, TITLE = {Isometric Registration of Ambiguous and Partial Data}, AUTHOR = {Tevs, Art and Bokeloh, Martin and Wand, Michael and Schilling, Andres and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4244-3992-8}, DOI = {10.1109/CVPR.2009.5206775}, LOCALID = {Local-ID: C125675300671F7B-FDE7C47CB92076A5C1257576006692B8-TevsCVPR2009}, PUBLISHER = {IEEE Computer Society}, YEAR = {2009}, DATE = {2009}, BOOKTITLE = {2009 IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2009)}, PAGES = {1185--1192}, ADDRESS = {Miami Beach, FL, USA}, }
Endnote
%0 Conference Proceedings %A Tevs, Art %A Bokeloh, Martin %A Wand, Michael %A Schilling, Andres %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Isometric Registration of Ambiguous and Partial Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-19B7-7 %F EDOC: 520491 %F OTHER: Local-ID: C125675300671F7B-FDE7C47CB92076A5C1257576006692B8-TevsCVPR2009 %R 10.1109/CVPR.2009.5206775 %D 2009 %B 2009 IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2009-06-20 - 2009-06-25 %C Miami Beach, FL, USA %B 2009 IEEE Conference on Computer Vision and Pattern Recognition %P 1185 - 1192 %I IEEE Computer Society %@ 978-1-4244-3992-8
Shi, K., Theisel, H., Hauser, H., et al. 2009. Path Line Attributes - An Information Visualization Approach to Analyzing the Dynamic Behavior of 3D Time-dependent Flow fields. In: Topology-Based Methods in Visualization II. Springer, Springer Verlag, Germany.
Abstract
We describe an approach to visually analyzing the dynamic behavior of 3D<br>time-dependent flow fields by considering the behavior of the path lines. At<br>selected positions in the 4D space-time domain, we compute a number of local<br>and global properties of path lines describing relevant features of them. The<br>resulting multivariate data set is analyzed by applying state-of-the-art <br>information<br>visualization approaches in the sense of a set of linked views (scatter<br>plots, parallel coordinates, etc.) with interactive brushing and focus+context<br>visualization. The selected path lines with certain properties are integrated<br>and visualized as colored 3D curves. This approach allows an interactive <br>exploration<br>of intricate 4D flow structures. We apply our method to a number<br>of flow data sets and describe how path line attributes are used for describing<br>characteristic features of these flows.
Export
BibTeX
@incollection{Shi-et-al_TBMV2.09, TITLE = {Path Line Attributes -- An Information Visualization Approach to Analyzing the Dynamic Behavior of {3D} Time-dependent Flow fields}, AUTHOR = {Shi, Kuangyu and Theisel, Holger and Hauser, Helwig and Weinkauf, Tino and Matkovic, Kresimir and Hege, Hans-Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1612-3786}, ISBN = {978-3-540-88605-1}, URL = {http://www.techfak.uni-bielefeld.de/ags/cg/publications/TopoInVis07.pdf}, DOI = {10.1007/978-3-540-88606-8_6}, LOCALID = {Local-ID: C125675300671F7B-924EA41935F1849CC125739A004CC077-Shi2007b}, PUBLISHER = {Springer}, ADDRESS = {Springer Verlag, Germany}, YEAR = {2009}, DATE = {2009}, ABSTRACT = {We describe an approach to visually analyzing the dynamic behavior of 3D<br>time-dependent flow fields by considering the behavior of the path lines. At<br>selected positions in the 4D space-time domain, we compute a number of local<br>and global properties of path lines describing relevant features of them. The<br>resulting multivariate data set is analyzed by applying state-of-the-art <br>information<br>visualization approaches in the sense of a set of linked views (scatter<br>plots, parallel coordinates, etc.) with interactive brushing and focus+context<br>visualization. The selected path lines with certain properties are integrated<br>and visualized as colored 3D curves. This approach allows an interactive <br>exploration<br>of intricate 4D flow structures. We apply our method to a number<br>of flow data sets and describe how path line attributes are used for describing<br>characteristic features of these flows.}, BOOKTITLE = {Topology-Based Methods in Visualization II}, EDITOR = {Hege, Hans-Christian and Polthier, Konrad and Scheuermann, Gerik}, PAGES = {75--88}, SERIES = {Mathematics and Visualization}, }
Endnote
%0 Book Section %A Shi, Kuangyu %A Theisel, Holger %A Hauser, Helwig %A Weinkauf, Tino %A Matkovic, Kresimir %A Hege, Hans-Christian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Path Line Attributes - An Information Visualization Approach to Analyzing the Dynamic Behavior of 3D Time-dependent Flow fields : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-19C6-5 %F EDOC: 520502 %U http://www.techfak.uni-bielefeld.de/ags/cg/publications/TopoInVis07.pdf %F OTHER: Local-ID: C125675300671F7B-924EA41935F1849CC125739A004CC077-Shi2007b %R 10.1007/978-3-540-88606-8_6 %D 2009 %X We describe an approach to visually analyzing the dynamic behavior of 3D<br>time-dependent flow fields by considering the behavior of the path lines. At<br>selected positions in the 4D space-time domain, we compute a number of local<br>and global properties of path lines describing relevant features of them. The<br>resulting multivariate data set is analyzed by applying state-of-the-art <br>information<br>visualization approaches in the sense of a set of linked views (scatter<br>plots, parallel coordinates, etc.) with interactive brushing and focus+context<br>visualization. The selected path lines with certain properties are integrated<br>and visualized as colored 3D curves. This approach allows an interactive <br>exploration<br>of intricate 4D flow structures. We apply our method to a number<br>of flow data sets and describe how path line attributes are used for describing<br>characteristic features of these flows. %B Topology-Based Methods in Visualization II %E Hege, Hans-Christian; Polthier, Konrad; Scheuermann, Gerik %P 75 - 88 %I Springer %C Springer Verlag, Germany %@ 978-3-540-88605-1 %S Mathematics and Visualization %@ false %U https://rdcu.be/dJd6h
Shaheen, M., Gall, J., Strzodka, R., Gool, L.V., and Seidel, H.-P. 2009. A Comparison of 3D Model-based Tracking Approaches for Human Motion Capture in Uncontrolled Environments. 2009 Workshop on Applications of Computer Vision (WACV 2009), IEEE.
Export
BibTeX
@inproceedings{DBLP:conf/wacv/ShaheenGSGS09, TITLE = {A Comparison of {3D} Model-based Tracking Approaches for Human Motion Capture in Uncontrolled Environments}, AUTHOR = {Shaheen, Mohammed and Gall, J{\"u}rgen and Strzodka, Robert and Gool, Luc Van and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4244-5497-6}, DOI = {10.1109/WACV.2009.5403039}, PUBLISHER = {IEEE}, YEAR = {2009}, DATE = {2009}, BOOKTITLE = {2009 Workshop on Applications of Computer Vision (WACV 2009)}, PAGES = {1--8}, ADDRESS = {Snowbird, UT, USA}, }
Endnote
%0 Conference Proceedings %A Shaheen, Mohammed %A Gall, J&#252;rgen %A Strzodka, Robert %A Gool, Luc Van %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T A Comparison of 3D Model-based Tracking Approaches for Human Motion Capture in Uncontrolled Environments : %G eng %U http://hdl.handle.net/21.11116/0000-000F-582B-F %R 10.1109/WACV.2009.5403039 %D 2009 %B 2009 Workshop on Applications of Computer Vision %Z date of event: 2009-12-07 - 2009-12-08 %C Snowbird, UT, USA %B 2009 Workshop on Applications of Computer Vision %P 1 - 8 %I IEEE %@ 978-1-4244-5497-6
Seidel, H.-P. 2009. The Cluster of Excellence on Multimodal Computing and Interaction – Robust, Efficient and Intelligent Processing of Text, Speech, Visual Data, and High Dimensional Representations. INFORMATIK 2009, Im Fokus des Lebends, Gesellschaft für Informatik.
Export
BibTeX
@inproceedings{DBLP:conf/gi/Seidel09, TITLE = {The Cluster of Excellence on Multimodal Computing and Interaction -- Robust, Efficient and Intelligent Processing of Text, Speech, Visual Data, and High Dimensional Representations}, AUTHOR = {Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-88579-248-2}, PUBLISHER = {Gesellschaft f{\"u}r Informatik}, PUBLISHER = {Gesellschaft f{\"u}r Informatik}, YEAR = {2009}, DATE = {2009}, BOOKTITLE = {INFORMATIK 2009, Im Fokus des Lebends}, EDITOR = {Fischer, Stefan and Maehle, Erik and Reischuk, R{\"u}diger}, VOLUME = {P-154}, PAGES = {5--14}, SERIES = {Lecture Notes in Informatics}, ADDRESS = {L{\"u}beck, Germany}, }
Endnote
%0 Conference Proceedings %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society %T The Cluster of Excellence on Multimodal Computing and Interaction &#8211; Robust, Efficient and Intelligent Processing of Text, Speech, Visual Data, and High Dimensional Representations : %G eng %U http://hdl.handle.net/21.11116/0000-000F-5948-D %D 2009 %B 39. Jahrestagung der Gesellschaft fur Informatik %Z date of event: 2009-09-28 - 2009-10-02 %C L&#252;beck, Germany %B INFORMATIK 2009, Im Fokus des Lebends %E Fischer, Stefan; Maehle, Erik; Reischuk, R&#252;diger %V P-154 %P 5 - 14 %I Gesellschaft f&#252;r Informatik %@ 978-3-88579-248-2 %B Lecture Notes in Informatics %I Gesellschaft f&#252;r Informatik
Seidel, H.-P., Haber, T., Fuchs, C., Bekaert, P., Goesele, M., and Lensch, H.P.A. 2009. Relighting Objects from Image Collections. 2009 IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2009), IEEE Computer Society.
Export
BibTeX
@inproceedings{Haber-et-al_CVPR09, TITLE = {Relighting Objects from Image Collections}, AUTHOR = {Seidel, Hans-Peter and Haber, Tom and Fuchs, Christian and Bekaert, Phillippe and Goesele, Michael and Lensch, Hendrik P. A.}, LANGUAGE = {eng}, ISBN = {978-1-4244-3992-8}, DOI = {10.1109/CVPR.2009.5206753}, LOCALID = {Local-ID: C125675300671F7B-8B9867B8A53B7D7BC1257586002F1505-Haber2009}, PUBLISHER = {IEEE Computer Society}, YEAR = {2009}, DATE = {2009}, BOOKTITLE = {2009 IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2009)}, PAGES = {627--634}, ADDRESS = {Miami, FL, USA}, }
Endnote
%0 Conference Proceedings %A Seidel, Hans-Peter %A Haber, Tom %A Fuchs, Christian %A Bekaert, Phillippe %A Goesele, Michael %A Lensch, Hendrik P. A. %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations External Organizations %T Relighting Objects from Image Collections : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-19D5-5 %F EDOC: 520483 %F OTHER: Local-ID: C125675300671F7B-8B9867B8A53B7D7BC1257586002F1505-Haber2009 %R 10.1109/CVPR.2009.5206753 %D 2009 %B 2009 IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2009-06-20 - 2009-06-25 %C Miami, FL, USA %B 2009 IEEE Conference on Computer Vision and Pattern Recognition %P 627 - 634 %I IEEE Computer Society %@ 978-1-4244-3992-8
Schultz, T., Weickert, J., and Seidel, H.-P. 2009a. A Higher-Order Structure Tensor. In: Visualization and Processing of Tensor Fields - Advances and Perspectives. Springer, Berlin.
Abstract
Structure tensors are a common tool for orientation estimation in image processing and computer vision. We present a generalization of the traditional second-order model to a higher-order structure tensor (HOST), which is able to model more than one significant orientation, as found in corners, junctions, and multi-channel images. We provide a theoretical analysis and a number of mathematical tools that facilitate practical use of the HOST, visualize it using a novel glyph for higher-order tensors, and demonstrate how it can be applied in an improved integrated edge, corner, and junction detector.
Export
BibTeX
@incollection{Schultz2009Dag, TITLE = {A Higher-Order Structure Tensor}, AUTHOR = {Schultz, Thomas and Weickert, Joachim and Seidel, Hans-Peter}, LANGUAGE = {eng}, LOCALID = {Local-ID: C125675300671F7B-0227541065079CB6C125759000538A67-Schultz2009Dag}, PUBLISHER = {Springer}, ADDRESS = {Berlin}, YEAR = {2009}, DATE = {2009}, ABSTRACT = {Structure tensors are a common tool for orientation estimation in image processing and computer vision. We present a generalization of the traditional second-order model to a higher-order structure tensor (HOST), which is able to model more than one significant orientation, as found in corners, junctions, and multi-channel images. We provide a theoretical analysis and a number of mathematical tools that facilitate practical use of the HOST, visualize it using a novel glyph for higher-order tensors, and demonstrate how it can be applied in an improved integrated edge, corner, and junction detector.}, BOOKTITLE = {Visualization and Processing of Tensor Fields -- Advances and Perspectives}, EDITOR = {Laidlaw, David H. and Weickert, Joachim}, PAGES = {263--280}, SERIES = {Mathematics and Visualization}, }
Endnote
%0 Book Section %A Schultz, Thomas %A Weickert, Joachim %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Higher-Order Structure Tensor : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-196B-4 %F EDOC: 520475 %F OTHER: Local-ID: C125675300671F7B-0227541065079CB6C125759000538A67-Schultz2009Dag %I Springer %C Berlin %D 2009 %X Structure tensors are a common tool for orientation estimation in image processing and computer vision. We present a generalization of the traditional second-order model to a higher-order structure tensor (HOST), which is able to model more than one significant orientation, as found in corners, junctions, and multi-channel images. We provide a theoretical analysis and a number of mathematical tools that facilitate practical use of the HOST, visualize it using a novel glyph for higher-order tensors, and demonstrate how it can be applied in an improved integrated edge, corner, and junction detector. %B Visualization and Processing of Tensor Fields - Advances and Perspectives %E Laidlaw, David H.; Weickert, Joachim %P 263 - 280 %I Springer %C Berlin %S Mathematics and Visualization
Schultz, T., Weickert, J., and Seidel, H.-P. 2009b. A Higher-order Structure Tensor. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
Structure tensors are a common tool for orientation estimation in image processing and computer vision. We present a generalization of the traditional second-order model to a higher-order structure tensor (HOST), which is able to model more than one significant orientation, as found in corners, junctions, and multi-channel images. We provide a theoretical analysis and a number of mathematical tools that facilitate practical use of the HOST, visualize it using a novel glyph for higher-order tensors, and demonstrate how it can be applied in an improved integrated edge, corner, and junction
Export
BibTeX
@techreport{SchultzlWeickertSeidel2007, TITLE = {A Higher-order Structure Tensor}, AUTHOR = {Schultz, Thomas and Weickert, Joachim and Seidel, Hans-Peter}, LANGUAGE = {eng}, NUMBER = {MPI-I-2007-4-005}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2009}, DATE = {2009}, ABSTRACT = {Structure tensors are a common tool for orientation estimation in image processing and computer vision. We present a generalization of the traditional second-order model to a higher-order structure tensor (HOST), which is able to model more than one significant orientation, as found in corners, junctions, and multi-channel images. We provide a theoretical analysis and a number of mathematical tools that facilitate practical use of the HOST, visualize it using a novel glyph for higher-order tensors, and demonstrate how it can be applied in an improved integrated edge, corner, and junction}, TYPE = {Research Report}, }
Endnote
%0 Report %A Schultz, Thomas %A Weickert, Joachim %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T A Higher-order Structure Tensor : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0027-13BC-7 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2009 %X Structure tensors are a common tool for orientation estimation in image processing and computer vision. We present a generalization of the traditional second-order model to a higher-order structure tensor (HOST), which is able to model more than one significant orientation, as found in corners, junctions, and multi-channel images. We provide a theoretical analysis and a number of mathematical tools that facilitate practical use of the HOST, visualize it using a novel glyph for higher-order tensors, and demonstrate how it can be applied in an improved integrated edge, corner, and junction %B Research Report
Scholz, V., El-Abed, S., Seidel, H.-P., and Magnor, M.A. 2009. Editing Object Behaviour in Video Sequences. Computer Graphics Forum28, 6.
Export
BibTeX
@article{DBLP:journals/cgf/ScholzESM09, TITLE = {Editing Object Behaviour in Video Sequences}, AUTHOR = {Scholz, Volker and El-Abed, Sascha and Seidel, Hans-Peter and Magnor, Marcus A.}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/J.1467-8659.2009.01413.X}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2009}, DATE = {2009}, JOURNAL = {Computer Graphics Forum}, VOLUME = {28}, NUMBER = {6}, PAGES = {1632--1643}, }
Endnote
%0 Journal Article %A Scholz, Volker %A El-Abed, Sascha %A Seidel, Hans-Peter %A Magnor, Marcus A. %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society %T Editing Object Behaviour in Video Sequences : %G eng %U http://hdl.handle.net/21.11116/0000-000F-59CC-8 %R 10.1111/J.1467-8659.2009.01413.X %D 2009 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 28 %N 6 %& 1632 %P 1632 - 1643 %I Blackwell-Wiley %C Oxford %@ false
Ritschel, T., Okabe, M., Thormählen, T., and Seidel, H.-P. 2009a. Interactive Reflection Editing. ACM Transactions on Graphics, ACM.
Export
BibTeX
@inproceedings{Ritschel-et-al_SIGGRAPH09.2, TITLE = {Interactive Reflection Editing}, AUTHOR = {Ritschel, Tobias and Okabe, Makoto and Thorm{\"a}hlen, Thorsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, ISBN = {978-1-60558-858-2}, DOI = {10.1145/1661412.1618475}, LOCALID = {Local-ID: C125675300671F7B-39F8B501C51D215AC1257664004B48A5-RitschelSigAsia2009}, PUBLISHER = {ACM}, PUBLISHER = {Association for Computing Machinery}, YEAR = {2009}, DATE = {2009}, BOOKTITLE = {SIGGRAPH Asia '09: ACM SIGGRAPH Asia 2009 papers}, PAGES = {129:1--129:7}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {28}, ISSUE = {5}, ADDRESS = {Yokohama, Japan}, }
Endnote
%0 Conference Proceedings %A Ritschel, Tobias %A Okabe, Makoto %A Thorm&#228;hlen, Thorsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive Reflection Editing : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-19B5-B %F EDOC: 520474 %F OTHER: Local-ID: C125675300671F7B-39F8B501C51D215AC1257664004B48A5-RitschelSigAsia2009 %R 10.1145/1661412.1618475 %D 2009 %Z Review method: peer-reviewed %B ACM SIGGRAPH Asia 2009 %Z date of event: 2009-12-16 - 2009-12-19 %C Yokohama, Japan %B SIGGRAPH Asia '09: ACM SIGGRAPH Asia 2009 papers %P 129:1 - 129:7 %I ACM %@ 978-1-60558-858-2 %J ACM Transactions on Graphics %V 28 %N 5 %I Association for Computing Machinery %@ false
Ritschel, T., Engelhardt, T., Grosch, T., Seidel, H.-P., Kautz, J., and Dachsbacher, C. 2009b. Micro-rendering for Scalable, Parallel Final Gathering. ACM Transactions on Graphics, ACM.
Abstract
Recent approaches to global illumination for dynamic scenes achieve interactive <br>frame rates by using coarse approximations to geometry, lighting, or both, <br>which limits scene complexity and rendering quality. High-quality global <br>illumination renderings of complex scenes are still limited to methods based on <br>ray tracing. While conceptually simple, these techniques are computationally <br>expensive. We present an efficient and scalable method to compute global <br>illumination solutions at interactive rates for complex and dynamic scenes. Our <br>method is based on parallel final gathering running entirely on the GPU. At <br>each final gathering location we perform micro-rendering: we traverse and <br>rasterize a hierarchical point-based scene representation into an <br>importance-warped micro-buffer, which allows for BRDF importance sampling. The <br>final reflected radiance is computed at each gathering location using the <br>micro-buffers and is then stored in image-space. We can trade quality for speed <br>by reducing the sampling rate of the gathering locations in conjunction with <br>bilateral upsampling. We demonstrate the applicability of our method to <br>interactive global illumination, the simulation of multiple indirect bounces, <br>and to final gathering from photon maps.
Export
BibTeX
@inproceedings{Ritschel-et-al_SIGGRAPH09, TITLE = {Micro-rendering for Scalable, Parallel Final Gathering}, AUTHOR = {Ritschel, Tobias and Engelhardt, Thomas and Grosch, Thorsten and Seidel, Hans-Peter and Kautz, Jan and Dachsbacher, Carsten}, LANGUAGE = {eng}, ISSN = {0730-0301}, ISBN = {978-1-60558-858-2}, DOI = {10.1145/1661412.1618478}, LOCALID = {Local-ID: C125675300671F7B-988EEF659FD4FDBCC1257637006845B4-RitschelEGSKD2009:MicroRendering}, PUBLISHER = {ACM}, PUBLISHER = {Association for Computing Machinery}, YEAR = {2009}, DATE = {2009}, ABSTRACT = {Recent approaches to global illumination for dynamic scenes achieve interactive <br>frame rates by using coarse approximations to geometry, lighting, or both, <br>which limits scene complexity and rendering quality. High-quality global <br>illumination renderings of complex scenes are still limited to methods based on <br>ray tracing. While conceptually simple, these techniques are computationally <br>expensive. We present an efficient and scalable method to compute global <br>illumination solutions at interactive rates for complex and dynamic scenes. Our <br>method is based on parallel final gathering running entirely on the GPU. At <br>each final gathering location we perform micro-rendering: we traverse and <br>rasterize a hierarchical point-based scene representation into an <br>importance-warped micro-buffer, which allows for BRDF importance sampling. The <br>final reflected radiance is computed at each gathering location using the <br>micro-buffers and is then stored in image-space. We can trade quality for speed <br>by reducing the sampling rate of the gathering locations in conjunction with <br>bilateral upsampling. We demonstrate the applicability of our method to <br>interactive global illumination, the simulation of multiple indirect bounces, <br>and to final gathering from photon maps.}, BOOKTITLE = {SIGGRAPH Asia '09: ACM SIGGRAPH Asia 2009 papers}, PAGES = {132:1--132:8}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {28}, ISSUE = {5}, ADDRESS = {Yokohama, Japan}, }
Endnote
%0 Conference Proceedings %A Ritschel, Tobias %A Engelhardt, Thomas %A Grosch, Thorsten %A Seidel, Hans-Peter %A Kautz, Jan %A Dachsbacher, Carsten %+ Computer Graphics, MPI for Informatics, Max Planck Society Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Max Planck Society %T Micro-rendering for Scalable, Parallel Final Gathering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-19BD-C %F EDOC: 520472 %F OTHER: Local-ID: C125675300671F7B-988EEF659FD4FDBCC1257637006845B4-RitschelEGSKD2009:MicroRendering %R 10.1145/1661412.1618478 %D 2009 %B ACM SIGGRAPH Asia 2009 %Z date of event: 2009-12-16 - 2009-12-19 %C Yokohama, Japan %X Recent approaches to global illumination for dynamic scenes achieve interactive <br>frame rates by using coarse approximations to geometry, lighting, or both, <br>which limits scene complexity and rendering quality. High-quality global <br>illumination renderings of complex scenes are still limited to methods based on <br>ray tracing. While conceptually simple, these techniques are computationally <br>expensive. We present an efficient and scalable method to compute global <br>illumination solutions at interactive rates for complex and dynamic scenes. Our <br>method is based on parallel final gathering running entirely on the GPU. At <br>each final gathering location we perform micro-rendering: we traverse and <br>rasterize a hierarchical point-based scene representation into an <br>importance-warped micro-buffer, which allows for BRDF importance sampling. The <br>final reflected radiance is computed at each gathering location using the <br>micro-buffers and is then stored in image-space. We can trade quality for speed <br>by reducing the sampling rate of the gathering locations in conjunction with <br>bilateral upsampling. We demonstrate the applicability of our method to <br>interactive global illumination, the simulation of multiple indirect bounces, <br>and to final gathering from photon maps. %B SIGGRAPH Asia '09: ACM SIGGRAPH Asia 2009 papers %P 132:1 - 132:8 %I ACM %@ 978-1-60558-858-2 %J ACM Transactions on Graphics %V 28 %N 5 %I Association for Computing Machinery %@ false
Ritschel, T., Grosch, T., and Seidel, H.-P. 2009c. Approximating Dynamic Global Illumination in Image Space. I3D ’09: Proceedings of the 2009 Symposium on Interactive 3D Graphics and Games, ACM.
Abstract
Physically plausible illumination at real-time framerates is often achieved <br>using approximations. One popular example is ambient occlusion (AO), for which <br>very simple and efficient implementations are used extensively in production. <br>Recent methods approximate AO between nearby geometry in screen space (SSAO). <br>The key observation described in this paper is, that screen-space occlusion <br>methods can be used to compute many more types of effects than just occlusion, <br>such as directional shadows and indirect color bleeding. The proposed <br>generalization has only a small overhead compared to classic SSAO, approximates <br>direct and one-bounce light transport in screen space, can be combined with <br>other methods that simulate transport for macro structures and is visually <br>equivalent to SSAO in the worst case without introducing new artifacts. Since <br>our method works in screen space, it does not depend on the geometric <br>complexity. Plausible directional occlusion and indirect lighting effects can <br>be displayed for large and fully dynamic scenes at real-time frame rates.
Export
BibTeX
@inproceedings{Ritschel-et-al_I3D09, TITLE = {Approximating Dynamic Global Illumination in Image Space}, AUTHOR = {Ritschel, Tobias and Grosch, Thorsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-60558-429-4}, DOI = {10.1145/1507149.1507161}, LOCALID = {Local-ID: C125675300671F7B-40A583FA98914182C1257583005456DC-RitschelGS2009:SSDO}, PUBLISHER = {ACM}, YEAR = {2009}, DATE = {2009}, ABSTRACT = {Physically plausible illumination at real-time framerates is often achieved <br>using approximations. One popular example is ambient occlusion (AO), for which <br>very simple and efficient implementations are used extensively in production. <br>Recent methods approximate AO between nearby geometry in screen space (SSAO). <br>The key observation described in this paper is, that screen-space occlusion <br>methods can be used to compute many more types of effects than just occlusion, <br>such as directional shadows and indirect color bleeding. The proposed <br>generalization has only a small overhead compared to classic SSAO, approximates <br>direct and one-bounce light transport in screen space, can be combined with <br>other methods that simulate transport for macro structures and is visually <br>equivalent to SSAO in the worst case without introducing new artifacts. Since <br>our method works in screen space, it does not depend on the geometric <br>complexity. Plausible directional occlusion and indirect lighting effects can <br>be displayed for large and fully dynamic scenes at real-time frame rates.}, BOOKTITLE = {I3D '09: Proceedings of the 2009 Symposium on Interactive 3D Graphics and Games}, PAGES = {75--82}, ADDRESS = {Boston, MA, USA}, }
Endnote
%0 Conference Proceedings %A Ritschel, Tobias %A Grosch, Thorsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Approximating Dynamic Global Illumination in Image Space : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1977-8 %F EDOC: 520488 %F OTHER: Local-ID: C125675300671F7B-40A583FA98914182C1257583005456DC-RitschelGS2009:SSDO %R 10.1145/1507149.1507161 %D 2009 %B 2009 Symposium on Interactive 3D Graphics and Games %Z date of event: 2009-02-27 - 2009-03-01 %C Boston, MA, USA %X Physically plausible illumination at real-time framerates is often achieved <br>using approximations. One popular example is ambient occlusion (AO), for which <br>very simple and efficient implementations are used extensively in production. <br>Recent methods approximate AO between nearby geometry in screen space (SSAO). <br>The key observation described in this paper is, that screen-space occlusion <br>methods can be used to compute many more types of effects than just occlusion, <br>such as directional shadows and indirect color bleeding. The proposed <br>generalization has only a small overhead compared to classic SSAO, approximates <br>direct and one-bounce light transport in screen space, can be combined with <br>other methods that simulate transport for macro structures and is visually <br>equivalent to SSAO in the worst case without introducing new artifacts. Since <br>our method works in screen space, it does not depend on the geometric <br>complexity. Plausible directional occlusion and indirect lighting effects can <br>be displayed for large and fully dynamic scenes at real-time frame rates. %B I3D '09: Proceedings of the 2009 Symposium on Interactive 3D Graphics and Games %P 75 - 82 %I ACM %@ 978-1-60558-429-4
Ritschel, T., Ihrke, M., Frisvad, J.R., Coppens, J., Myszkowski, K., and Seidel, H.-P. 2009d. Temporal Glare: Real-time Dynamic Simulation of the Scattering in the Human Eye. Computer Graphics Forum, Blackwell-Wiley.
Abstract
Glare is a consequence of light scattered within the human eye when looking at <br>bright light sources. This effect can be exploited for tone mapping since <br>adding glare to the depiction of high-dynamic range (HDR) imagery on a <br>low-dynamic range (LDR) medium can dramatically increase perceived contrast. <br>Even though most, if not all, subjects report perceiving glare as a bright <br>pattern that fluctuates in time, up to now it has only been modeled as a static <br>phenomenon. We argue that the temporal properties of glare are a strong means <br>to increase perceived brightness and to produce realistic and attractive <br>renderings of bright light sources. Based on the anatomy of the human eye, we <br>propose a model that enables real-time simulation of dynamic glare on a GPU. <br>This allows an improved depiction of HDR images on LDR media for interactive <br>applications like games, feature films, or even by adding movement to initially <br>static HDR images. By conducting psychophysical studies, we validate that our <br>method improves perceived brightness and that dynamic glare-renderings are <br>often perceived as more attractive depending on the chosen scene.
Export
BibTeX
@inproceedings{Ritschel-et-al_Eurographics09, TITLE = {Temporal Glare: Real-time Dynamic Simulation of the Scattering in the Human Eye}, AUTHOR = {Ritschel, Tobias and Ihrke, Matthias and Frisvad, Jeppe Revall and Coppens, Joris and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2009.01357.x}, LOCALID = {Local-ID: C125675300671F7B-C0AF37EF8D7C4059C125755C00337FD6-Ritschel2009EG}, PUBLISHER = {Blackwell-Wiley}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2009}, DATE = {2009}, ABSTRACT = {Glare is a consequence of light scattered within the human eye when looking at <br>bright light sources. This effect can be exploited for tone mapping since <br>adding glare to the depiction of high-dynamic range (HDR) imagery on a <br>low-dynamic range (LDR) medium can dramatically increase perceived contrast. <br>Even though most, if not all, subjects report perceiving glare as a bright <br>pattern that fluctuates in time, up to now it has only been modeled as a static <br>phenomenon. We argue that the temporal properties of glare are a strong means <br>to increase perceived brightness and to produce realistic and attractive <br>renderings of bright light sources. Based on the anatomy of the human eye, we <br>propose a model that enables real-time simulation of dynamic glare on a GPU. <br>This allows an improved depiction of HDR images on LDR media for interactive <br>applications like games, feature films, or even by adding movement to initially <br>static HDR images. By conducting psychophysical studies, we validate that our <br>method improves perceived brightness and that dynamic glare-renderings are <br>often perceived as more attractive depending on the chosen scene.}, BOOKTITLE = {Eurographics 2009}, PAGES = {183--192}, JOURNAL = {Computer Graphics Forum}, VOLUME = {28}, ISSUE = {2}, ADDRESS = {Munich, Germany}, }
Endnote
%0 Conference Proceedings %A Ritschel, Tobias %A Ihrke, Matthias %A Frisvad, Jeppe Revall %A Coppens, Joris %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Temporal Glare: Real-time Dynamic Simulation of the Scattering in the Human Eye : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-19E7-E %F EDOC: 520489 %F OTHER: Local-ID: C125675300671F7B-C0AF37EF8D7C4059C125755C00337FD6-Ritschel2009EG %R 10.1111/j.1467-8659.2009.01357.x %D 2009 %Z Review method: peer-reviewed %B 30th Annual Conference of the European Association for Computer Graphics %Z date of event: 2009-03-30 - 2009-04-03 %C Munich, Germany %X Glare is a consequence of light scattered within the human eye when looking at <br>bright light sources. This effect can be exploited for tone mapping since <br>adding glare to the depiction of high-dynamic range (HDR) imagery on a <br>low-dynamic range (LDR) medium can dramatically increase perceived contrast. <br>Even though most, if not all, subjects report perceiving glare as a bright <br>pattern that fluctuates in time, up to now it has only been modeled as a static <br>phenomenon. We argue that the temporal properties of glare are a strong means <br>to increase perceived brightness and to produce realistic and attractive <br>renderings of bright light sources. Based on the anatomy of the human eye, we <br>propose a model that enables real-time simulation of dynamic glare on a GPU. <br>This allows an improved depiction of HDR images on LDR media for interactive <br>applications like games, feature films, or even by adding movement to initially <br>static HDR images. By conducting psychophysical studies, we validate that our <br>method improves perceived brightness and that dynamic glare-renderings are <br>often perceived as more attractive depending on the chosen scene. %B Eurographics 2009 %P 183 - 192 %I Blackwell-Wiley %J Computer Graphics Forum %V 28 %N 2 %I Blackwell-Wiley %@ false
Okabe, M., Anjyo, K., Igarashi, T., and Seidel, H.-P. 2009. Animating Pictures of Fluid using Video Examples. Computer Graphics Forum, Blackwell-Wiley.
Export
BibTeX
@inproceedings{Okabe-et-al_Eurographics09, TITLE = {Animating Pictures of Fluid using Video Examples}, AUTHOR = {Okabe, Makoto and Anjyo, Ken and Igarashi, Takeo and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2009.01408.x}, LOCALID = {Local-ID: C125675300671F7B-D71B209D698AC6A9C125755B0048203A-Okabe2009}, PUBLISHER = {Blackwell-Wiley}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2009}, DATE = {2009}, BOOKTITLE = {Eurographics 2009}, PAGES = {677--686}, JOURNAL = {Computer Graphics Forum}, VOLUME = {28}, ISSUE = {2}, ADDRESS = {Munich, Germany}, }
Endnote
%0 Conference Proceedings %A Okabe, Makoto %A Anjyo, Ken %A Igarashi, Takeo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Animating Pictures of Fluid using Video Examples : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1970-5 %F EDOC: 520501 %F OTHER: Local-ID: C125675300671F7B-D71B209D698AC6A9C125755B0048203A-Okabe2009 %R 10.1111/j.1467-8659.2009.01408.x %D 2009 %Z Review method: peer-reviewed %B 30th Annual Conference of the European Association for Computer Graphics %Z date of event: 2009-03-30 - 2009-04-03 %C Munich, Germany %B Eurographics 2009 %P 677 - 686 %I Blackwell-Wiley %J Computer Graphics Forum %V 28 %N 2 %I Blackwell-Wiley %@ false
Müller, M., Baak, A., and Seidel, H.-P. 2009. Efficient and Robust Annotation of Motion Capture Data. SCA ’09: Proceedings of the 2009 ACM SIGGRAPH/Eurographics Symposium on Computer Animation, ACM.
Export
BibTeX
@inproceedings{Muller-et-al_SCA09, TITLE = {Efficient and Robust Annotation of Motion Capture Data}, AUTHOR = {M{\"u}ller, Meinard and Baak, Andreas and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-60558-610-6}, DOI = {10.1145/1599470.1599473}, LOCALID = {Local-ID: C125675300671F7B-1910BB5410A2328DC125767B0040FD32-MuellerBS09_MocapAnnotation_SCA}, PUBLISHER = {ACM}, YEAR = {2009}, DATE = {2009}, BOOKTITLE = {SCA '09: Proceedings of the 2009 ACM SIGGRAPH/Eurographics Symposium on Computer Animation}, EDITOR = {Grinspun, Eitan and Hodgins, Jessica}, PAGES = {17--26}, ADDRESS = {New Orleans, LA, USA}, }
Endnote
%0 Conference Proceedings %A M&#252;ller, Meinard %A Baak, Andreas %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Efficient and Robust Annotation of Motion Capture Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-19A3-4 %F EDOC: 520461 %F OTHER: Local-ID: C125675300671F7B-1910BB5410A2328DC125767B0040FD32-MuellerBS09_MocapAnnotation_SCA %R 10.1145/1599470.1599473 %D 2009 %B 2009 ACM SIGGRAPH/Eurographics Symposium on Computer Animation %Z date of event: 2009-08-01 - 2009-08-02 %C New Orleans, LA, USA %B SCA '09: Proceedings of the 2009 ACM SIGGRAPH/Eurographics Symposium on Computer Animation %E Grinspun, Eitan; Hodgins, Jessica %P 17 - 26 %I ACM %@ 978-1-60558-610-6
Lee, S., Eisemann, E., and Seidel, H.-P. 2009. Depth-of-Field Rendering with Multiview Synthesis. ACM Transactions on Graphics, ACM.
Export
BibTeX
@inproceedings{Lee-et-al_SIGGRAPH09, TITLE = {Depth-of-Field Rendering with Multiview Synthesis}, AUTHOR = {Lee, Sungkil and Eisemann, Elmar and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, ISBN = {978-1-60558-858-2}, DOI = {10.1145/1661412.1618480}, LOCALID = {Local-ID: C125675300671F7B-B932B9516E673FD6C12576C5003F526C-Lee2009z}, PUBLISHER = {ACM}, PUBLISHER = {Association for Computing Machinery}, YEAR = {2009}, DATE = {2009}, BOOKTITLE = {SIGGRAPH Asia '09: ACM SIGGRAPH Asia 2009 papers}, PAGES = {134:1--134:6}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {28}, ISSUE = {5}, ADDRESS = {Yokohama, Japan}, }
Endnote
%0 Conference Proceedings %A Lee, Sungkil %A Eisemann, Elmar %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Depth-of-Field Rendering with Multiview Synthesis : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-19A0-A %F EDOC: 520453 %F OTHER: Local-ID: C125675300671F7B-B932B9516E673FD6C12576C5003F526C-Lee2009z %R 10.1145/1661412.1618480 %D 2009 %B ACM SIGGRAPH Asia 2009 %Z date of event: 2009-12-16 - 2009-12-19 %C Yokohama, Japan %B SIGGRAPH Asia '09: ACM SIGGRAPH Asia 2009 papers %P 134:1 - 134:6 %I ACM %@ 978-1-60558-858-2 %J ACM Transactions on Graphics %V 28 %N 5 %I Association for Computing Machinery %@ false
Lasowski, R., Tevs, A., Seidel, H.-P., and Wand, M. 2009. A Probabilistic Framework for Partial Intrinsic Symmetries in Geometric Data. 2009 IEEE 12th International Conference on Computer Vision (ICCV 2009), IEEE Computer Society.
Export
BibTeX
@inproceedings{Lasowski-et-al_ICCV09, TITLE = {A Probabilistic Framework for Partial Intrinsic Symmetries in Geometric Data}, AUTHOR = {Lasowski, Ruxandra and Tevs, Art and Seidel, Hans-Peter and Wand, Michael}, LANGUAGE = {eng}, ISBN = {978-1-4244-4420-5}, DOI = {10.1109/ICCV.2009.5459356}, LOCALID = {Local-ID: C125675300671F7B-4DE5F9E2CBADD549C125760E0041AC87-LaTeWaSe_ICCV09}, PUBLISHER = {IEEE Computer Society}, YEAR = {2009}, DATE = {2009}, BOOKTITLE = {2009 IEEE 12th International Conference on Computer Vision (ICCV 2009)}, EDITOR = {Cipolla, Roberto and Hebert, Martial and Tang, Xiaoou and Yokoya, Naokazu}, PAGES = {963--970}, ADDRESS = {Kyoto, Japan}, }
Endnote
%0 Conference Proceedings %A Lasowski, Ruxandra %A Tevs, Art %A Seidel, Hans-Peter %A Wand, Michael %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Probabilistic Framework for Partial Intrinsic Symmetries in Geometric Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-197A-2 %F EDOC: 520478 %F OTHER: Local-ID: C125675300671F7B-4DE5F9E2CBADD549C125760E0041AC87-LaTeWaSe_ICCV09 %R 10.1109/ICCV.2009.5459356 %D 2009 %B 2009 IEEE 12th International Conference on Computer Vision %Z date of event: 2009-09-29 - 2009-10-02 %C Kyoto, Japan %B 2009 IEEE 12th International Conference on Computer Vision %E Cipolla, Roberto; Hebert, Martial; Tang, Xiaoou; Yokoya, Naokazu %P 963 - 970 %I IEEE Computer Society %@ 978-1-4244-4420-5
Kurz, C., Thromählen, T., and Seidel, H.-P. 2009a. Scene-aware Video Stabilization by Visual Fixation. The 6th European Conference for Visual Media Production (CVMP2009), IEEE Computer Society.
Abstract
Visual fixation is employed by humans and some animals to keep a specific 3D location at the center of the visual gaze. Inspired by this phenomenon in nature, this paper explores the idea to transfer this mechanism to the context of video stabilization for a hand-held video camera. A novel approach is presented that stabilizes a video by fixating on automatically extracted 3D target points. This approach is different from existing automatic solutions that stabilize the video by smoothing. To determine the 3D target points, the recorded scene is analyzed with a state-of-the-art structure-from-motion algorithm, which estimates camera motion and reconstructs a 3D point cloud of the static scene objects. Special algorithms are presented that search either virtual or real 3D target points, which back-project close to the center of the image for as long a period of time as possible. The stabilization algorithm then transforms the original images of the sequence so that these 3D target points are kept exactly in the center of the image, which, in case of real 3D target points, produces a perfectly stable result at the image center. The approach is evaluated on a variety of videos taken with a hand-held camera in natural scenes.
Export
BibTeX
@inproceedings{Kurz2009b, TITLE = {Scene-aware Video Stabilization by Visual Fixation}, AUTHOR = {Kurz, Christian and Throm{\"a}hlen, Thorsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-0-7695-3893-8/09}, DOI = {DOI 10.1109/CVMP.2009.9}, LOCALID = {Local-ID: C125675300671F7B-D52A53EB54540085C12576C500527EA0-Kurz2009b}, PUBLISHER = {IEEE Computer Society}, YEAR = {2009}, DATE = {2009}, ABSTRACT = {Visual fixation is employed by humans and some animals to keep a specific 3D location at the center of the visual gaze. Inspired by this phenomenon in nature, this paper explores the idea to transfer this mechanism to the context of video stabilization for a hand-held video camera. A novel approach is presented that stabilizes a video by fixating on automatically extracted 3D target points. This approach is different from existing automatic solutions that stabilize the video by smoothing. To determine the 3D target points, the recorded scene is analyzed with a state-of-the-art structure-from-motion algorithm, which estimates camera motion and reconstructs a 3D point cloud of the static scene objects. Special algorithms are presented that search either virtual or real 3D target points, which back-project close to the center of the image for as long a period of time as possible. The stabilization algorithm then transforms the original images of the sequence so that these 3D target points are kept exactly in the center of the image, which, in case of real 3D target points, produces a perfectly stable result at the image center. The approach is evaluated on a variety of videos taken with a hand-held camera in natural scenes.}, BOOKTITLE = {The 6th European Conference for Visual Media Production (CVMP2009)}, PAGES = {XX--XX}, }
Endnote
%0 Conference Proceedings %A Kurz, Christian %A Throm&#228;hlen, Thorsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Scene-aware Video Stabilization by Visual Fixation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-19DD-6 %F EDOC: 520451 %R DOI 10.1109/CVMP.2009.9 %F OTHER: Local-ID: C125675300671F7B-D52A53EB54540085C12576C500527EA0-Kurz2009b %I IEEE Computer Society %D 2009 %B Untitled Event %Z date of event: 2009-11-12 - 2009-10-13 %C London, UK %X Visual fixation is employed by humans and some animals to keep a specific 3D location at the center of the visual gaze. Inspired by this phenomenon in nature, this paper explores the idea to transfer this mechanism to the context of video stabilization for a hand-held video camera. A novel approach is presented that stabilizes a video by fixating on automatically extracted 3D target points. This approach is different from existing automatic solutions that stabilize the video by smoothing. To determine the 3D target points, the recorded scene is analyzed with a state-of-the-art structure-from-motion algorithm, which estimates camera motion and reconstructs a 3D point cloud of the static scene objects. Special algorithms are presented that search either virtual or real 3D target points, which back-project close to the center of the image for as long a period of time as possible. The stabilization algorithm then transforms the original images of the sequence so that these 3D target points are kept exactly in the center of the image, which, in case of real 3D target points, produces a perfectly stable result at the image center. The approach is evaluated on a variety of videos taken with a hand-held camera in natural scenes. %B The 6th European Conference for Visual Media Production (CVMP2009) %P XX - XX %I IEEE Computer Society %@ 978-0-7695-3893-8/09
Kurz, C., Thormählen, T., Rosenhahn, B., and Seidel, H.-P. 2009b. Exploiting Mutual Camera Visibility in Multi-camera Motion Estimation. Advances in Visual Computing (ISVC 2009), Springer.
Abstract
This paper addresses the estimation of camera motion and 3D reconstruction from <br>image sequences for multiple independently moving cameras. If multiple moving <br>cameras record the same scene, a camera is often visible in another camera's <br>field of view. This poses a constraint on the position of the observed camera, <br>which can be included into the conjoined optimization process. The paper <br>contains the following contributions: Firstly, a fully automatic detection and <br>tracking algorithm for the position of a moving camera in the image sequence of <br>another moving camera is presented. Secondly, a sparse bundle adjustment <br>algorithm is introduced, which includes this additional constraint on the <br>position of the tracked camera. Since the additional constraints minimize the <br>geometric error at the boundary of the reconstructed volume, the total <br>reconstruction accuracy can be improved significantly. Experiments with <br>synthetic and challenging real world scenes show the improved performance of <br>our fully automatic method.
Export
BibTeX
@inproceedings{Kurz-et-al_ISVC09, TITLE = {Exploiting Mutual Camera Visibility in Multi-camera Motion Estimation}, AUTHOR = {Kurz, Christian and Thorm{\"a}hlen, Thorsten and Rosenhahn, Bodo and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-642-10330-8}, URL = {http://www.springerlink.com/content/07613462tn202238/}, DOI = {10.1007/978-3-642-10331-5_37}, LOCALID = {Local-ID: C125675300671F7B-B738005696C44FF6C12576C5004EF034-Kurz2009a}, PUBLISHER = {Springer}, YEAR = {2009}, DATE = {2009}, ABSTRACT = {This paper addresses the estimation of camera motion and 3D reconstruction from <br>image sequences for multiple independently moving cameras. If multiple moving <br>cameras record the same scene, a camera is often visible in another camera's <br>field of view. This poses a constraint on the position of the observed camera, <br>which can be included into the conjoined optimization process. The paper <br>contains the following contributions: Firstly, a fully automatic detection and <br>tracking algorithm for the position of a moving camera in the image sequence of <br>another moving camera is presented. Secondly, a sparse bundle adjustment <br>algorithm is introduced, which includes this additional constraint on the <br>position of the tracked camera. Since the additional constraints minimize the <br>geometric error at the boundary of the reconstructed volume, the total <br>reconstruction accuracy can be improved significantly. Experiments with <br>synthetic and challenging real world scenes show the improved performance of <br>our fully automatic method.}, BOOKTITLE = {Advances in Visual Computing (ISVC 2009)}, EDITOR = {Bebis, George and Boyle, Richard D. and Parvin, Bahram and Koracin, Darko and Kuno, Yoshinori and Wang, Junxian and Pajarola, Renato and Lindstrom, Peter and Hinkenjann, Andr{\'e} and Encarna{\c c}{\~a}o, Miguel L. and Silva, Cl{\'a}udio T. and Coming, Daniel S.}, PAGES = {391--402}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {5875}, ADDRESS = {Las Vegas, NV, USA}, }
Endnote
%0 Conference Proceedings %A Kurz, Christian %A Thorm&#228;hlen, Thorsten %A Rosenhahn, Bodo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Exploiting Mutual Camera Visibility in Multi-camera Motion Estimation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-19A9-7 %F EDOC: 520450 %R 10.1007/978-3-642-10331-5_37 %U http://www.springerlink.com/content/07613462tn202238/ %F OTHER: Local-ID: C125675300671F7B-B738005696C44FF6C12576C5004EF034-Kurz2009a %D 2009 %B 5th International Symposium on Advances in Visual Computing %Z date of event: 2009-11-30 - 2009-12-02 %C Las Vegas, NV, USA %X This paper addresses the estimation of camera motion and 3D reconstruction from <br>image sequences for multiple independently moving cameras. If multiple moving <br>cameras record the same scene, a camera is often visible in another camera's <br>field of view. This poses a constraint on the position of the observed camera, <br>which can be included into the conjoined optimization process. The paper <br>contains the following contributions: Firstly, a fully automatic detection and <br>tracking algorithm for the position of a moving camera in the image sequence of <br>another moving camera is presented. Secondly, a sparse bundle adjustment <br>algorithm is introduced, which includes this additional constraint on the <br>position of the tracked camera. Since the additional constraints minimize the <br>geometric error at the boundary of the reconstructed volume, the total <br>reconstruction accuracy can be improved significantly. Experiments with <br>synthetic and challenging real world scenes show the improved performance of <br>our fully automatic method. %B Advances in Visual Computing %E Bebis, George; Boyle, Richard D.; Parvin, Bahram; Koracin, Darko; Kuno, Yoshinori; Wang, Junxian; Pajarola, Renato; Lindstrom, Peter; Hinkenjann, Andr&#233;; Encarna&#231;&#227;o, Miguel L.; Silva, Cl&#225;udio T.; Coming, Daniel S. %P 391 - 402 %I Springer %@ 978-3-642-10330-8 %B Lecture Notes in Computer Science %N 5875 %U https://rdcu.be/dJkjO
Kosov, S., Thormählen, T., and Seidel, H.-P. 2009a. Accurate Real-time Disparity Estimation with Variational Methods. Advances in Visual Computing, Springer.
Export
BibTeX
@inproceedings{DBLP:conf/isvc/KosovTS09, TITLE = {Accurate Real-time Disparity Estimation with Variational Methods}, AUTHOR = {Kosov, Sergey and Thorm{\"a}hlen, Thorsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-642-10330-8}, DOI = {10.1007/978-3-642-10331-5_74}, PUBLISHER = {Springer}, YEAR = {2009}, DATE = {2009}, BOOKTITLE = {Advances in Visual Computing}, EDITOR = {Bebis, George and Boyle, Richard D. and Parvin, Bahram and Koracin, Darko and Kuno, Yoshinori and Wang, Junxian and Pajarola, Renato and Lindstrom, Peter and Hinkenjann, Andr{\'e} and Encarna{\c c}{\~a}o, L. Miguel and Silva, Cl{\'a}udio T. and Coming, Daniel S.}, PAGES = {796--807}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {5875}, ADDRESS = {Las Vegas, NV, USA}, }
Endnote
%0 Conference Proceedings %A Kosov, Sergey %A Thorm&#228;hlen, Thorsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Accurate Real-time Disparity Estimation with Variational Methods : %G eng %U http://hdl.handle.net/21.11116/0000-000F-5879-7 %R 10.1007/978-3-642-10331-5_74 %D 2009 %B 5th International Symposium on Advances in Visual Computing %Z date of event: 2009-11-30 - 2009-12-02 %C Las Vegas, NV, USA %B Advances in Visual Computing %E Bebis, George; Boyle, Richard D.; Parvin, Bahram; Koracin, Darko; Kuno, Yoshinori; Wang, Junxian; Pajarola, Renato; Lindstrom, Peter; Hinkenjann, Andr&#233;; Encarna&#231;&#227;o, L. Miguel; Silva, Cl&#225;udio T.; Coming, Daniel S. %P 796 - 807 %I Springer %@ 978-3-642-10330-8 %B Lecture Notes in Computer Science %N 5875 %U https://rdcu.be/dJevs
Kosov, S., Scherbaum, K., Faber, K., Thormählen, T., and Seidel, H.-P. 2009b. Rapid Stereo-vision Enhanced Face Detection. Proceedings ot the 16th IEEE International Conference on Image Processing (ICIP 2009), IEEE.
Export
BibTeX
@inproceedings{Kosov-et-al_ICIP09, TITLE = {Rapid Stereo-vision Enhanced Face Detection}, AUTHOR = {Kosov, Sergey and Scherbaum, Kristina and Faber, Kamil and Thorm{\"a}hlen, Thorsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4244-5655-0}, DOI = {10.1109/ICIP.2009.5413675}, LOCALID = {Local-ID: C125675300671F7B-453DE8CEB1A7509EC1257664004A76F7-KosovICIP2009}, PUBLISHER = {IEEE}, YEAR = {2009}, DATE = {2009}, BOOKTITLE = {Proceedings ot the 16th IEEE International Conference on Image Processing (ICIP 2009)}, PAGES = {1221--1224}, ADDRESS = {Cairo, Egypt}, }
Endnote
%0 Conference Proceedings %A Kosov, Sergey %A Scherbaum, Kristina %A Faber, Kamil %A Thorm&#228;hlen, Thorsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Max Planck Society Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Rapid Stereo-vision Enhanced Face Detection : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-19CF-4 %F EDOC: 520473 %F OTHER: Local-ID: C125675300671F7B-453DE8CEB1A7509EC1257664004A76F7-KosovICIP2009 %R 10.1109/ICIP.2009.5413675 %D 2009 %B 16th IEEE International Conference on Image Processing %Z date of event: 2009-11-07 - 2009-11-10 %C Cairo, Egypt %B Proceedings ot the 16th IEEE International Conference on Image Processing %P 1221 - 1224 %I IEEE %@ 978-1-4244-5655-0
Kerber, J., Tevs, A., Zayer, R., Belyaev, A., and Seidel, H.-P. 2009. Feature Sensitive Bas Relief Generation. IEEE International Conference on Shape Modeling and Applications Proceedings, IEEE Computer Society Press.
Abstract
Among all forms of sculpture, bas-relief is arguably the closest to painting. <br>Although inherently a two dimensional sculpture, a bas-relief suggests a visual <br>spatial extension of the scene in depth through the combination of composition, <br>perspective, and shading. Most recently, there have been significant results on <br>digital bas-relief generation but many of the existing techniques may wash out <br>high level surface detail during the compression process. The primary goal of <br>this work is to address the problem of fine features by tailoring a filtering <br>technique that achieves good compression without compromising the quality of <br>surface details. As a secondary application we explore the generation of <br>artistic relief which mimic cubism in painting and we show how it could be used <br>for generating Picasso like portraits.
Export
BibTeX
@inproceedings{Kerber-et-al_SMI08, TITLE = {Feature Sensitive Bas Relief Generation}, AUTHOR = {Kerber, Jens and Tevs, Art and Zayer, Rhaleb and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4244-4068-9}, URL = {http://www.mpi-inf.mpg.de/~kerber/publications/Jens_Kerber_SMI_2009.pdf}, DOI = {10.1109/SMI.2009.5170176}, LOCALID = {Local-ID: C125675300671F7B-8F2AFB7CFC9176DFC12575F6003BAF29-Kerber_SMI2009}, PUBLISHER = {IEEE Computer Society Press}, YEAR = {2009}, DATE = {2009}, ABSTRACT = {Among all forms of sculpture, bas-relief is arguably the closest to painting. <br>Although inherently a two dimensional sculpture, a bas-relief suggests a visual <br>spatial extension of the scene in depth through the combination of composition, <br>perspective, and shading. Most recently, there have been significant results on <br>digital bas-relief generation but many of the existing techniques may wash out <br>high level surface detail during the compression process. The primary goal of <br>this work is to address the problem of fine features by tailoring a filtering <br>technique that achieves good compression without compromising the quality of <br>surface details. As a secondary application we explore the generation of <br>artistic relief which mimic cubism in painting and we show how it could be used <br>for generating Picasso like portraits.}, BOOKTITLE = {IEEE International Conference on Shape Modeling and Applications Proceedings}, EDITOR = {Yong, Jun-Hai and Spagnuolo, Michela and Wang, Wenping}, PAGES = {148--154}, ADDRESS = {Beijing, China}, }
Endnote
%0 Conference Proceedings %A Kerber, Jens %A Tevs, Art %A Zayer, Rhaleb %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Feature Sensitive Bas Relief Generation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-19AD-0 %F EDOC: 520477 %U http://www.mpi-inf.mpg.de/~kerber/publications/Jens_Kerber_SMI_2009.pdf %F OTHER: Local-ID: C125675300671F7B-8F2AFB7CFC9176DFC12575F6003BAF29-Kerber_SMI2009 %R 10.1109/SMI.2009.5170176 %D 2009 %B 2009 IEEE International Conference on Shape Modeling and Applications %Z date of event: 2009-06-26 - 2009-06-28 %C Beijing, China %X Among all forms of sculpture, bas-relief is arguably the closest to painting. <br>Although inherently a two dimensional sculpture, a bas-relief suggests a visual <br>spatial extension of the scene in depth through the combination of composition, <br>perspective, and shading. Most recently, there have been significant results on <br>digital bas-relief generation but many of the existing techniques may wash out <br>high level surface detail during the compression process. The primary goal of <br>this work is to address the problem of fine features by tailoring a filtering <br>technique that achieves good compression without compromising the quality of <br>surface details. As a secondary application we explore the generation of <br>artistic relief which mimic cubism in painting and we show how it could be used <br>for generating Picasso like portraits. %B IEEE International Conference on Shape Modeling and Applications Proceedings %E Yong, Jun-Hai; Spagnuolo, Michela; Wang, Wenping %P 148 - 154 %I IEEE Computer Society Press %@ 978-1-4244-4068-9
Ihrke, M., Ritschel, T., Smith, K., Grosch, T., Myszkowski, K., and Seidel, H.-P. 2009. A Perceptual Evaluation of 3D Unsharp Masking. Human Vision and Electronic Imaging XIV, IS &T SPIE’s 21st Annual Symposium on Electronic Imaging, SPIE.
Abstract
Much research has gone into developing methods for enhancing the contrast of <br>displayed 3D scenes. In the<br>current study, we investigated the perceptual impact of an algorithm recently <br>proposed by Ritschel et al.1 that<br>provides a general technique for enhancing the perceived contrast in <br>synthesized scenes. Their algorithm extends<br>traditional image-based Unsharp Masking to a 3D scene, achieving a <br>scene-coherent enhancement. We conducted<br>a standardized perceptual experiment to test the proposition that a 3D unsharp <br>enhanced scene was superior to<br>the original scene in terms of perceived contrast and preference. Furthermore, <br>the impact of different settings<br>of the algorithm’s main parameters enhancement-strength (¸) and gradient size <br>(¾) were studied in order to<br>provide an estimate of a reasonable parameter space for the method. All <br>participants preferred a clearly visible<br>enhancement over the original, non-enhanced scenes and the setting for <br>objectionable enhancement was far<br>above the preferred settings. The effect of the gradient size ¾ was negligible. <br>The general pattern found for<br>the parameters provides a useful guideline for designers when making use of 3D <br>Unsharp Masking: as a rule of<br>thumb they can easily determine the strength for which they start to perceive <br>an enhancement and use twice<br>this value for a good effect. Since the value for objectionable results was <br>twice as large again, artifacts should<br>not impose restrictions on the applicability of this rule.
Export
BibTeX
@inproceedings{Ihrke-et-al_SPIE09, TITLE = {A Perceptual Evaluation of {3D} Unsharp Masking}, AUTHOR = {Ihrke, Matthias and Ritschel, Tobias and Smith, Kaleigh and Grosch, Thorsten and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {doi:10.1117/12.809026}, LOCALID = {Local-ID: C125675300671F7B-5AB79508CF9875C4C125755C0035BB4C-Ihrke2009SPIE}, PUBLISHER = {SPIE}, YEAR = {2009}, DATE = {2009}, ABSTRACT = {Much research has gone into developing methods for enhancing the contrast of <br>displayed 3D scenes. In the<br>current study, we investigated the perceptual impact of an algorithm recently <br>proposed by Ritschel et al.1 that<br>provides a general technique for enhancing the perceived contrast in <br>synthesized scenes. Their algorithm extends<br>traditional image-based Unsharp Masking to a 3D scene, achieving a <br>scene-coherent enhancement. We conducted<br>a standardized perceptual experiment to test the proposition that a 3D unsharp <br>enhanced scene was superior to<br>the original scene in terms of perceived contrast and preference. Furthermore, <br>the impact of different settings<br>of the algorithm{\textquoteright}s main parameters enhancement-strength (&#184;) and gradient size <br>(&#190;) were studied in order to<br>provide an estimate of a reasonable parameter space for the method. All <br>participants preferred a clearly visible<br>enhancement over the original, non-enhanced scenes and the setting for <br>objectionable enhancement was far<br>above the preferred settings. The effect of the gradient size &#190; was negligible. <br>The general pattern found for<br>the parameters provides a useful guideline for designers when making use of 3D <br>Unsharp Masking: as a rule of<br>thumb they can easily determine the strength for which they start to perceive <br>an enhancement and use twice<br>this value for a good effect. Since the value for objectionable results was <br>twice as large again, artifacts should<br>not impose restrictions on the applicability of this rule.}, BOOKTITLE = {Human Vision and Electronic Imaging XIV, IS \&T SPIE's 21st Annual Symposium on Electronic Imaging}, EDITOR = {Rogowitz, Bernice E. and Pappas, Thrasyvoulos N.}, PAGES = {72400R--1-12}, SERIES = {Annual Symposium on Electronic Imaging}, VOLUME = {7240}, ADDRESS = {San Jose, CA, USA}, }
Endnote
%0 Conference Proceedings %A Ihrke, Matthias %A Ritschel, Tobias %A Smith, Kaleigh %A Grosch, Thorsten %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Perceptual Evaluation of 3D Unsharp Masking : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1975-C %F EDOC: 520494 %R doi:10.1117/12.809026 %F OTHER: Local-ID: C125675300671F7B-5AB79508CF9875C4C125755C0035BB4C-Ihrke2009SPIE %D 2009 %B 21st Annual Symposium on Electronic Imaging %Z date of event: 2009-01-19 - 2009-01-22 %C San Jose, CA, USA %X Much research has gone into developing methods for enhancing the contrast of <br>displayed 3D scenes. In the<br>current study, we investigated the perceptual impact of an algorithm recently <br>proposed by Ritschel et al.1 that<br>provides a general technique for enhancing the perceived contrast in <br>synthesized scenes. Their algorithm extends<br>traditional image-based Unsharp Masking to a 3D scene, achieving a <br>scene-coherent enhancement. We conducted<br>a standardized perceptual experiment to test the proposition that a 3D unsharp <br>enhanced scene was superior to<br>the original scene in terms of perceived contrast and preference. Furthermore, <br>the impact of different settings<br>of the algorithm&#8217;s main parameters enhancement-strength (&#184;) and gradient size <br>(&#190;) were studied in order to<br>provide an estimate of a reasonable parameter space for the method. All <br>participants preferred a clearly visible<br>enhancement over the original, non-enhanced scenes and the setting for <br>objectionable enhancement was far<br>above the preferred settings. The effect of the gradient size &#190; was negligible. <br>The general pattern found for<br>the parameters provides a useful guideline for designers when making use of 3D <br>Unsharp Masking: as a rule of<br>thumb they can easily determine the strength for which they start to perceive <br>an enhancement and use twice<br>this value for a good effect. Since the value for objectionable results was <br>twice as large again, artifacts should<br>not impose restrictions on the applicability of this rule. %B Human Vision and Electronic Imaging XIV, IS &T SPIE's 21st Annual Symposium on Electronic Imaging %E Rogowitz, Bernice E.; Pappas, Thrasyvoulos N. %P 72400R - 1-12 %I SPIE %B Annual Symposium on Electronic Imaging %N 7240
Hullin, M.B., Ajdin, B., Hanika, J., Seidel, H.-P., Kautz, J., and Lensch, H.P.A. 2009. Acquisition and analysis of bispectral bidirectional reflectance distribution functions. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
In fluorescent materials, energy from a certain band of incident wavelengths is reflected or reradiated at larger wavelengths, i.e. with lower energy per photon. While fluorescent materials are common in everyday life, they have received little attention in computer graphics. Especially, no bidirectional reflectance measurements of fluorescent materials have been available so far. In this paper, we develop the concept of a bispectral BRDF, which extends the well-known concept of the bidirectional reflectance distribution function (BRDF) to account for energy transfer between wavelengths. Using a bidirectional and bispectral measurement setup, we acquire reflectance data of a variety of fluorescent materials, including vehicle paints, paper and fabric. We show bispectral renderings of the measured data and compare them with reduced versions of the bispectral BRDF, including the traditional RGB vector valued BRDF. Principal component analysis of the measured data reveals that for some materials the fluorescent reradiation spectrum changes considerably over the range of directions. We further show that bispectral BRDFs can be efficiently acquired using an acquisition strategy based on principal components.
Export
BibTeX
@techreport{HullinAjdinHanikaSeidelKautzLensch2009, TITLE = {Acquisition and analysis of bispectral bidirectional reflectance distribution functions}, AUTHOR = {Hullin, Matthias B. and Ajdin, Boris and Hanika, Johannes and Seidel, Hans-Peter and Kautz, Jan and Lensch, Hendrik P. A.}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2009-4-001}, NUMBER = {MPI-I-2009-4-001}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2009}, DATE = {2009}, ABSTRACT = {In fluorescent materials, energy from a certain band of incident wavelengths is reflected or reradiated at larger wavelengths, i.e. with lower energy per photon. While fluorescent materials are common in everyday life, they have received little attention in computer graphics. Especially, no bidirectional reflectance measurements of fluorescent materials have been available so far. In this paper, we develop the concept of a bispectral BRDF, which extends the well-known concept of the bidirectional reflectance distribution function (BRDF) to account for energy transfer between wavelengths. Using a bidirectional and bispectral measurement setup, we acquire reflectance data of a variety of fluorescent materials, including vehicle paints, paper and fabric. We show bispectral renderings of the measured data and compare them with reduced versions of the bispectral BRDF, including the traditional RGB vector valued BRDF. Principal component analysis of the measured data reveals that for some materials the fluorescent reradiation spectrum changes considerably over the range of directions. We further show that bispectral BRDFs can be efficiently acquired using an acquisition strategy based on principal components.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Hullin, Matthias B. %A Ajdin, Boris %A Hanika, Johannes %A Seidel, Hans-Peter %A Kautz, Jan %A Lensch, Hendrik P. A. %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Acquisition and analysis of bispectral bidirectional reflectance distribution functions : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-6671-4 %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2009-4-001 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2009 %P 25 p. %X In fluorescent materials, energy from a certain band of incident wavelengths is reflected or reradiated at larger wavelengths, i.e. with lower energy per photon. While fluorescent materials are common in everyday life, they have received little attention in computer graphics. Especially, no bidirectional reflectance measurements of fluorescent materials have been available so far. In this paper, we develop the concept of a bispectral BRDF, which extends the well-known concept of the bidirectional reflectance distribution function (BRDF) to account for energy transfer between wavelengths. Using a bidirectional and bispectral measurement setup, we acquire reflectance data of a variety of fluorescent materials, including vehicle paints, paper and fabric. We show bispectral renderings of the measured data and compare them with reduced versions of the bispectral BRDF, including the traditional RGB vector valued BRDF. Principal component analysis of the measured data reveals that for some materials the fluorescent reradiation spectrum changes considerably over the range of directions. We further show that bispectral BRDFs can be efficiently acquired using an acquisition strategy based on principal components. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Herzog, R., Myszkowski, K., and Seidel, H.-P. 2009. Anisotropic Radiance-cache Splatting for Efficiently Computing High-quality Global Illumination with Lightcuts. Computer Graphics Forum, Blackwell-Wiley.
Abstract
Computing global illumination in complex scenes is even with todays <br>computational power a demanding task.<br>In this work we propose a novel irradiance caching scheme that combines the <br>advantages of two state-of-the-art algorithms for high-quality global <br>illumination rendering: \emph{lightcuts}, an adaptive and hierarchical <br>instant-radiosity based algorithm and the widely used (ir)radiance caching <br>algorithm for sparse sampling and interpolation of (ir)radiance in object <br>space. Our adaptive radiance caching algorithm is based on anisotropic cache <br>splatting, which adapts the cache footprints not only to the magnitude of the <br>illumination gradient computed with lightcuts but also to its orientation <br>allowing larger interpolation errors along the direction of coherent <br>illumination while reducing the error along the illumination gradient. Since <br>lightcuts computes the direct and indirect lighting seamlessly, we use a <br>two-layer radiance cache, to store and control the interpolation of direct and <br>indirect lighting individually with different error criteria. In multiple <br>iterations our method detects cache interpolation errors above the visibility <br>threshold of a pixel and reduces the anisotropic cache footprints accordingly. <br>We achieve significantly better image quality while also speeding up the <br>computation costs by one to two orders of magnitude with respect to the <br>well-known photon mapping with (ir)radiance caching procedure.
Export
BibTeX
@inproceedings{Herzog-et-al_Eurographics09, TITLE = {Anisotropic Radiance-cache Splatting for Efficiently Computing High-quality Global Illumination with Lightcuts}, AUTHOR = {Herzog, Robert and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2009.01365.x}, LOCALID = {Local-ID: C125675300671F7B-56F88E3A387C52D6C12575550034E328-Herzog2008}, PUBLISHER = {Blackwell-Wiley}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2009}, DATE = {2009}, ABSTRACT = {Computing global illumination in complex scenes is even with todays <br>computational power a demanding task.<br>In this work we propose a novel irradiance caching scheme that combines the <br>advantages of two state-of-the-art algorithms for high-quality global <br>illumination rendering: \emph{lightcuts}, an adaptive and hierarchical <br>instant-radiosity based algorithm and the widely used (ir)radiance caching <br>algorithm for sparse sampling and interpolation of (ir)radiance in object <br>space. Our adaptive radiance caching algorithm is based on anisotropic cache <br>splatting, which adapts the cache footprints not only to the magnitude of the <br>illumination gradient computed with lightcuts but also to its orientation <br>allowing larger interpolation errors along the direction of coherent <br>illumination while reducing the error along the illumination gradient. Since <br>lightcuts computes the direct and indirect lighting seamlessly, we use a <br>two-layer radiance cache, to store and control the interpolation of direct and <br>indirect lighting individually with different error criteria. In multiple <br>iterations our method detects cache interpolation errors above the visibility <br>threshold of a pixel and reduces the anisotropic cache footprints accordingly. <br>We achieve significantly better image quality while also speeding up the <br>computation costs by one to two orders of magnitude with respect to the <br>well-known photon mapping with (ir)radiance caching procedure.}, BOOKTITLE = {Eurographics 2009}, EDITOR = {Stamminger, Marc and Dutr{\'e}, Philip}, PAGES = {259--268}, JOURNAL = {Computer Graphics Forum}, VOLUME = {28}, ISSUE = {2}, ADDRESS = {M{\"u}nchen, Germany}, }
Endnote
%0 Conference Proceedings %A Herzog, Robert %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Anisotropic Radiance-cache Splatting for Efficiently Computing High-quality Global Illumination with Lightcuts : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1972-1 %F EDOC: 520500 %F OTHER: Local-ID: C125675300671F7B-56F88E3A387C52D6C12575550034E328-Herzog2008 %R 10.1111/j.1467-8659.2009.01365.x %D 2009 %B 30th Annual Conference of the European Association for Computer Graphics %Z date of event: 2009-03-30 - 2009-04-03 %C M&#252;nchen, Germany %X Computing global illumination in complex scenes is even with todays <br>computational power a demanding task.<br>In this work we propose a novel irradiance caching scheme that combines the <br>advantages of two state-of-the-art algorithms for high-quality global <br>illumination rendering: \emph{lightcuts}, an adaptive and hierarchical <br>instant-radiosity based algorithm and the widely used (ir)radiance caching <br>algorithm for sparse sampling and interpolation of (ir)radiance in object <br>space. Our adaptive radiance caching algorithm is based on anisotropic cache <br>splatting, which adapts the cache footprints not only to the magnitude of the <br>illumination gradient computed with lightcuts but also to its orientation <br>allowing larger interpolation errors along the direction of coherent <br>illumination while reducing the error along the illumination gradient. Since <br>lightcuts computes the direct and indirect lighting seamlessly, we use a <br>two-layer radiance cache, to store and control the interpolation of direct and <br>indirect lighting individually with different error criteria. In multiple <br>iterations our method detects cache interpolation errors above the visibility <br>threshold of a pixel and reduces the anisotropic cache footprints accordingly. <br>We achieve significantly better image quality while also speeding up the <br>computation costs by one to two orders of magnitude with respect to the <br>well-known photon mapping with (ir)radiance caching procedure. %B Eurographics 2009 %E Stamminger, Marc; Dutr&#233;, Philip %P 259 - 268 %I Blackwell-Wiley %J Computer Graphics Forum %V 28 %N 2 %I Blackwell-Wiley %@ false
Havran, V., Zajac, J., Drahokoupil, J., and Seidel, H.-P. 2009. MPI Informatics building model as data for your research. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
In this report we describe the MPI Informatics building model that provides the data of the Max-Planck-Institut f\"{u}r Informatik (MPII) building. We present our motivation for this work and its relationship to reproducibility of a scientific research. We describe the dataset acquisition and creation including geometry, luminaires, surface reflectances, reference photographs etc. needed to use this model in testing of algorithms. The created dataset can be used in computer graphics and beyond, in particular in global illumination algorithms with focus on realistic and predictive image synthesis. Outside of computer graphics, it can be used as general source of real world geometry with an existing counterpart and hence also suitable for computer vision.
Export
BibTeX
@techreport{HavranZajacDrahokoupilSeidel2009, TITLE = {{MPI} Informatics building model as data for your research}, AUTHOR = {Havran, Vlastimil and Zajac, Jozef and Drahokoupil, Jiri and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2009-4-004}, NUMBER = {MPI-I-2009-4-004}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2009}, DATE = {2009}, ABSTRACT = {In this report we describe the MPI Informatics building model that provides the data of the Max-Planck-Institut f\"{u}r Informatik (MPII) building. We present our motivation for this work and its relationship to reproducibility of a scientific research. We describe the dataset acquisition and creation including geometry, luminaires, surface reflectances, reference photographs etc. needed to use this model in testing of algorithms. The created dataset can be used in computer graphics and beyond, in particular in global illumination algorithms with focus on realistic and predictive image synthesis. Outside of computer graphics, it can be used as general source of real world geometry with an existing counterpart and hence also suitable for computer vision.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Havran, Vlastimil %A Zajac, Jozef %A Drahokoupil, Jiri %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T MPI Informatics building model as data for your research : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-6665-F %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2009-4-004 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2009 %P 113 p. %X In this report we describe the MPI Informatics building model that provides the data of the Max-Planck-Institut f\"{u}r Informatik (MPII) building. We present our motivation for this work and its relationship to reproducibility of a scientific research. We describe the dataset acquisition and creation including geometry, luminaires, surface reflectances, reference photographs etc. needed to use this model in testing of algorithms. The created dataset can be used in computer graphics and beyond, in particular in global illumination algorithms with focus on realistic and predictive image synthesis. Outside of computer graphics, it can be used as general source of real world geometry with an existing counterpart and hence also suitable for computer vision. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Hasler, N., Rosenhahn, B., Thormählen, T., Wand, M., Gall, J., and Seidel, H.-P. 2009a. Markerless Motion Capture with Unsynchronized Moving Cameras. 2009 IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2009), IEEE Computer Society.
Abstract
In this work we present an approach for markerless motion capture (MoCap) of <br>articulated objects, which are recorded with multiple unsynchronized moving <br>cameras. Instead of using fixed (and expensive) hardware synchronized cameras, <br>this approach allows<br>us to track people with off-the-shelf handheld video ca\-me\-ras. To prepare a <br>sequence for motion capture, we first reconstruct<br>the static background and the position of each camera using <br>Structure-from-Motion (SfM). Then the cameras<br>are registered to each other using the reconstructed static background geometry.<br>Camera synchronization is achieved via the audio streams recorded by the <br>ca\-me\-ras in parallel. Finally, a markerless MoCap approach is applied to <br>recover positions and joint configurations of subjects. Feature tracks and <br>dense background geometry are further used to stabilize the MoCap. The <br>ex\-pe\-ri\-ments show examples with highly challenging indoor and outdoor <br>scenes.
Export
BibTeX
@inproceedings{Hasler-et-al_CVPR09, TITLE = {Markerless Motion Capture with Unsynchronized Moving Cameras}, AUTHOR = {Hasler, Nils and Rosenhahn, Bodo and Thorm{\"a}hlen, Thorsten and Wand, Michael and Gall, J{\"u}rgen and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4244-3992-8}, DOI = {10.1109/CVPRW.2009.5206859}, LOCALID = {Local-ID: C125675300671F7B-5D7FAF825CA490EAC1257583004C7FFC-HasRosThoWanGalSei09Audio}, PUBLISHER = {IEEE Computer Society}, YEAR = {2009}, DATE = {2009}, ABSTRACT = {In this work we present an approach for markerless motion capture (MoCap) of <br>articulated objects, which are recorded with multiple unsynchronized moving <br>cameras. Instead of using fixed (and expensive) hardware synchronized cameras, <br>this approach allows<br>us to track people with off-the-shelf handheld video ca\-me\-ras. To prepare a <br>sequence for motion capture, we first reconstruct<br>the static background and the position of each camera using <br>Structure-from-Motion (SfM). Then the cameras<br>are registered to each other using the reconstructed static background geometry.<br>Camera synchronization is achieved via the audio streams recorded by the <br>ca\-me\-ras in parallel. Finally, a markerless MoCap approach is applied to <br>recover positions and joint configurations of subjects. Feature tracks and <br>dense background geometry are further used to stabilize the MoCap. The <br>ex\-pe\-ri\-ments show examples with highly challenging indoor and outdoor <br>scenes.}, BOOKTITLE = {2009 IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2009)}, PAGES = {224--231}, ADDRESS = {Miami, FL, USA}, }
Endnote
%0 Conference Proceedings %A Hasler, Nils %A Rosenhahn, Bodo %A Thorm&#228;hlen, Thorsten %A Wand, Michael %A Gall, J&#252;rgen %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Markerless Motion Capture with Unsynchronized Moving Cameras : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-19BB-0 %F EDOC: 520486 %R 10.1109/CVPRW.2009.5206859 %F OTHER: Local-ID: C125675300671F7B-5D7FAF825CA490EAC1257583004C7FFC-HasRosThoWanGalSei09Audio %D 2009 %B 2009 IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2009-06-20 - 2009-06-25 %C Miami, FL, USA %X In this work we present an approach for markerless motion capture (MoCap) of <br>articulated objects, which are recorded with multiple unsynchronized moving <br>cameras. Instead of using fixed (and expensive) hardware synchronized cameras, <br>this approach allows<br>us to track people with off-the-shelf handheld video ca\-me\-ras. To prepare a <br>sequence for motion capture, we first reconstruct<br>the static background and the position of each camera using <br>Structure-from-Motion (SfM). Then the cameras<br>are registered to each other using the reconstructed static background geometry.<br>Camera synchronization is achieved via the audio streams recorded by the <br>ca\-me\-ras in parallel. Finally, a markerless MoCap approach is applied to <br>recover positions and joint configurations of subjects. Feature tracks and <br>dense background geometry are further used to stabilize the MoCap. The <br>ex\-pe\-ri\-ments show examples with highly challenging indoor and outdoor <br>scenes. %B 2009 IEEE Conference on Computer Vision and Pattern Recognition %P 224 - 231 %I IEEE Computer Society %@ 978-1-4244-3992-8
Hasler, N., Stoll, C., Thormählen, T., Rosenhahn, B., and Seidel, H.-P. 2009b. Estimating Body Shape of Dressed Humans. Computers & Graphics33, 3.
Abstract
The paper presents a method to estimate the detailed 3D body shape of a person even if heavy or loose clothing is worn. The approach is based on a space of human shapes, learned from a large database of registered body scans. Together with this database we use as input a 3D scan or model of the person wearing clothes and apply a fitting method, based on ICP (iterated closest point) registration and Laplacian mesh deformation. The statistical model of human body shapes enforces that the model stays within the space of human shapes. The method therefore allows us to compute the most likely shape and pose of the subject, even if it is heavily occluded or body parts are not visible. Several experiments demonstrate the applicability and accuracy of our approach to recover occluded or missing body parts from 3D laser scans.
Export
BibTeX
@article{HasStoThoRosSei09HiddenBody, TITLE = {Estimating Body Shape of Dressed Humans}, AUTHOR = {Hasler, Nils and Stoll, Carsten and Thorm{\"a}hlen, Thorsten and Rosenhahn, Bodo and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://www.mpi-inf.mpg.de/~hasler/download/HasStoRosThoSei09HiddenBody.pdf}, DOI = {10.1016/j.cag.2009.03.026}, LOCALID = {Local-ID: C125675300671F7B-EAD3C4F215D6E3A6C1257583004BE658-HasStoThoRosSei09HiddenBody}, YEAR = {2009}, DATE = {2009}, ABSTRACT = {The paper presents a method to estimate the detailed 3D body shape of a person even if heavy or loose clothing is worn. The approach is based on a space of human shapes, learned from a large database of registered body scans. Together with this database we use as input a 3D scan or model of the person wearing clothes and apply a fitting method, based on ICP (iterated closest point) registration and Laplacian mesh deformation. The statistical model of human body shapes enforces that the model stays within the space of human shapes. The method therefore allows us to compute the most likely shape and pose of the subject, even if it is heavily occluded or body parts are not visible. Several experiments demonstrate the applicability and accuracy of our approach to recover occluded or missing body parts from 3D laser scans.}, JOURNAL = {Computers \& Graphics}, VOLUME = {33}, NUMBER = {3}, PAGES = {211--216}, }
Endnote
%0 Journal Article %A Hasler, Nils %A Stoll, Carsten %A Thorm&#228;hlen, Thorsten %A Rosenhahn, Bodo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Estimating Body Shape of Dressed Humans : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-19A7-B %F EDOC: 520485 %R 10.1016/j.cag.2009.03.026 %U http://www.mpi-inf.mpg.de/~hasler/download/HasStoRosThoSei09HiddenBody.pdf %F OTHER: Local-ID: C125675300671F7B-EAD3C4F215D6E3A6C1257583004BE658-HasStoThoRosSei09HiddenBody %D 2009 %* Review method: peer-reviewed %X The paper presents a method to estimate the detailed 3D body shape of a person even if heavy or loose clothing is worn. The approach is based on a space of human shapes, learned from a large database of registered body scans. Together with this database we use as input a 3D scan or model of the person wearing clothes and apply a fitting method, based on ICP (iterated closest point) registration and Laplacian mesh deformation. The statistical model of human body shapes enforces that the model stays within the space of human shapes. The method therefore allows us to compute the most likely shape and pose of the subject, even if it is heavily occluded or body parts are not visible. Several experiments demonstrate the applicability and accuracy of our approach to recover occluded or missing body parts from 3D laser scans. %J Computers & Graphics %V 33 %N 3 %& 211 %P 211 - 216
Hasler, N., Stoll, C., Sunkel, M., Rosenhahn, B., and Seidel, H.-P. 2009c. A Statistical Model of Human Pose and Body Shape. Computer Graphics Forum, Blackwell.
Abstract
Generation and animation of realistic humans is an essential part of many <br>projects in today’s media industry.<br>Especially, the games and special effects industry heavily depend on realistic <br>human animation. In this work a<br>unified model that describes both, human pose and body shape is introduced <br>which allows us to accurately model<br>muscle deformations not only as a function of pose but also dependent on the <br>physique of the subject. Coupled with<br>the model’s ability to generate arbitrary human body shapes, it severely <br>simplifies the generation of highly realistic<br>character animations. A learning based approach is trained on approximately 550 <br>full body 3D laser scans taken<br>of 114 subjects. Scan registration is performed using a non-rigid deformation <br>technique. Then, a rotation invariant<br>encoding of the acquired exemplars permits the computation of a statistical <br>model that simultaneously encodes<br>pose and body shape. Finally, morphing or generating meshes according to <br>several constraints simultaneously<br>can be achieved by training semantically meaningful regressors.
Export
BibTeX
@inproceedings{Hasler-et-al_Eurographics09, TITLE = {A Statistical Model of Human Pose and Body Shape}, AUTHOR = {Hasler, Nils and Stoll, Carsten and Sunkel, Martin and Rosenhahn, Bodo and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, URL = {http://www.mpi-inf.mpg.de/~hasler/download/HasStoSunRosSei09Human.pdf}, DOI = {10.1111/j.1467-8659.2009.01373.x}, LOCALID = {Local-ID: C125675300671F7B-7D32114BA6C90CAAC1257554004B8A5E-Hasler2009}, PUBLISHER = {Blackwell}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2009}, DATE = {2009}, ABSTRACT = {Generation and animation of realistic humans is an essential part of many <br>projects in today{\textquoteright}s media industry.<br>Especially, the games and special effects industry heavily depend on realistic <br>human animation. In this work a<br>unified model that describes both, human pose and body shape is introduced <br>which allows us to accurately model<br>muscle deformations not only as a function of pose but also dependent on the <br>physique of the subject. Coupled with<br>the model{\textquoteright}s ability to generate arbitrary human body shapes, it severely <br>simplifies the generation of highly realistic<br>character animations. A learning based approach is trained on approximately 550 <br>full body 3D laser scans taken<br>of 114 subjects. Scan registration is performed using a non-rigid deformation <br>technique. Then, a rotation invariant<br>encoding of the acquired exemplars permits the computation of a statistical <br>model that simultaneously encodes<br>pose and body shape. Finally, morphing or generating meshes according to <br>several constraints simultaneously<br>can be achieved by training semantically meaningful regressors.}, BOOKTITLE = {Eurographics 2009}, PAGES = {337--346}, JOURNAL = {Computer Graphics Forum}, VOLUME = {28}, ISSUE = {2}, ADDRESS = {Munich, Germany}, }
Endnote
%0 Conference Proceedings %A Hasler, Nils %A Stoll, Carsten %A Sunkel, Martin %A Rosenhahn, Bodo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Statistical Model of Human Pose and Body Shape : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1982-E %F EDOC: 520498 %U http://www.mpi-inf.mpg.de/~hasler/download/HasStoSunRosSei09Human.pdf %F OTHER: Local-ID: C125675300671F7B-7D32114BA6C90CAAC1257554004B8A5E-Hasler2009 %R 10.1111/j.1467-8659.2009.01373.x %D 2009 %B 30th Annual Conference of the European Association for Computer Graphics %Z date of event: 2009-03-30 - 2009-04-03 %C Munich, Germany %X Generation and animation of realistic humans is an essential part of many <br>projects in today&#8217;s media industry.<br>Especially, the games and special effects industry heavily depend on realistic <br>human animation. In this work a<br>unified model that describes both, human pose and body shape is introduced <br>which allows us to accurately model<br>muscle deformations not only as a function of pose but also dependent on the <br>physique of the subject. Coupled with<br>the model&#8217;s ability to generate arbitrary human body shapes, it severely <br>simplifies the generation of highly realistic<br>character animations. A learning based approach is trained on approximately 550 <br>full body 3D laser scans taken<br>of 114 subjects. Scan registration is performed using a non-rigid deformation <br>technique. Then, a rotation invariant<br>encoding of the acquired exemplars permits the computation of a statistical <br>model that simultaneously encodes<br>pose and body shape. Finally, morphing or generating meshes according to <br>several constraints simultaneously<br>can be achieved by training semantically meaningful regressors. %B Eurographics 2009 %P 337 - 346 %I Blackwell %J Computer Graphics Forum %V 28 %N 2 %I Blackwell-Wiley %@ false
Gall, J., Stoll, C., de Aguiar, E., Theobalt, C., Rosenhahn, B., and Seidel, H.-P. 2009. Motion Capture Using Joint Skeleton Tracking and Surface Estimation. 2009 IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2009), IEEE Computer Society.
Abstract
This paper proposes a method for capturing the performance of a human or<br>an animal from a multi-view video sequence. Given an articulated<br>template model and silhouettes from a multi-view image sequence, our<br>approach recovers not only the movement of the skeleton, but also the<br>possibly non-rigid temporal deformation of the 3D surface.<br>While large scale deformations or fast movements are captured by the skeleton<br>pose and approximate surface skinning, true small scale deformations or <br>non-rigid garment motion are captured by fitting the surface to<br>the silhouette. We further<br>propose a novel optimization scheme for skeleton-based pose estimation<br>that exploits the skeleton's tree structure to split the<br>optimization problem into a local one and a lower dimensional global one.<br>We show on various sequences that our approach can capture the 3D motion of<br>animals and humans accurately even in the case of rapid movements and<br>wide apparel like skirts.
Export
BibTeX
@inproceedings{Gall-et-al_CVPR09, TITLE = {Motion Capture Using Joint Skeleton Tracking and Surface Estimation}, AUTHOR = {Gall, J{\"u}rgen and Stoll, Carsten and de Aguiar, Edilson and Theobalt, Christian and Rosenhahn, Bodo and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4244-3992-8}, DOI = {10.1109/CVPR.2009.5206755}, LOCALID = {Local-ID: C125675300671F7B-76CFE158D6F5470BC1257583005FD867-Gall2009b}, PUBLISHER = {IEEE Computer Society}, YEAR = {2009}, DATE = {2009}, ABSTRACT = {This paper proposes a method for capturing the performance of a human or<br>an animal from a multi-view video sequence. Given an articulated<br>template model and silhouettes from a multi-view image sequence, our<br>approach recovers not only the movement of the skeleton, but also the<br>possibly non-rigid temporal deformation of the 3D surface.<br>While large scale deformations or fast movements are captured by the skeleton<br>pose and approximate surface skinning, true small scale deformations or <br>non-rigid garment motion are captured by fitting the surface to<br>the silhouette. We further<br>propose a novel optimization scheme for skeleton-based pose estimation<br>that exploits the skeleton's tree structure to split the<br>optimization problem into a local one and a lower dimensional global one.<br>We show on various sequences that our approach can capture the 3D motion of<br>animals and humans accurately even in the case of rapid movements and<br>wide apparel like skirts.}, BOOKTITLE = {2009 IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2009)}, PAGES = {1746--1753}, ADDRESS = {Miami, FL, USA}, }
Endnote
%0 Conference Proceedings %A Gall, J&#252;rgen %A Stoll, Carsten %A de Aguiar, Edilson %A Theobalt, Christian %A Rosenhahn, Bodo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Motion Capture Using Joint Skeleton Tracking and Surface Estimation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-19BF-8 %F EDOC: 520480 %F OTHER: Local-ID: C125675300671F7B-76CFE158D6F5470BC1257583005FD867-Gall2009b %R 10.1109/CVPR.2009.5206755 %D 2009 %B 2009 IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2009-06-20 - 2009-06-25 %C Miami, FL, USA %X This paper proposes a method for capturing the performance of a human or<br>an animal from a multi-view video sequence. Given an articulated<br>template model and silhouettes from a multi-view image sequence, our<br>approach recovers not only the movement of the skeleton, but also the<br>possibly non-rigid temporal deformation of the 3D surface.<br>While large scale deformations or fast movements are captured by the skeleton<br>pose and approximate surface skinning, true small scale deformations or <br>non-rigid garment motion are captured by fitting the surface to<br>the silhouette. We further<br>propose a novel optimization scheme for skeleton-based pose estimation<br>that exploits the skeleton's tree structure to split the<br>optimization problem into a local one and a lower dimensional global one.<br>We show on various sequences that our approach can capture the 3D motion of<br>animals and humans accurately even in the case of rapid movements and<br>wide apparel like skirts. %B 2009 IEEE Conference on Computer Vision and Pattern Recognition %P 1746 - 1753 %I IEEE Computer Society %@ 978-1-4244-3992-8
Fuchs, M., Chen, T., Wang, O., Raskar, R., Lensch, H.P.A., and Seidel, H.-P. 2009a. A shaped temporal filter camera. Max-Planck-Institut für Informatik, Saarbrücken.
Export
BibTeX
@techreport{FuchsChenWangRaskarLenschSeidel2009, TITLE = {A shaped temporal filter camera}, AUTHOR = {Fuchs, Martin and Chen, Tongbo and Wang, Oliver and Raskar, Ramesh and Lensch, Hendrik P. A. and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2009-4-003}, NUMBER = {MPI-I-2009-4-003}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2009}, DATE = {2009}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Fuchs, Martin %A Chen, Tongbo %A Wang, Oliver %A Raskar, Ramesh %A Lensch, Hendrik P. A. %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A shaped temporal filter camera : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-666E-E %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2009-4-003 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2009 %P 25 p. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Fuchs, M., Chen, T., Wang, O., Raskar, R., Seidel, H.-P., and Lensch, H.P.A. 2009b. A Shaped Temporal Filter Camera. Proceedings of the Vision, Modeling and Visualization Workshop 2009 (VMV 2009), Otto-Von-Guericke-Universität Magdeburg, Institut für Simulation und Graphik.
Abstract
Digital movie cameras only perform a discrete sampling of real-world imagery.<br> While spatial sampling effects are well studied in the literature, there has<br> not been as much work in regards to temporal sampling. As cameras get faster<br> and faster, the need for conventional frame-rate video that matches the<br> abilities of human perception remains. In this article, we introduce a system<br> with controlled temporal sampling behavior. It transforms a high fps input<br> stream into a conventional speed output video in<br> real-time.<br> %<br> We investigate the effect of different temporal sampling kernels and<br> demonstrate that extended, overlapping kernels can mitigate aliasing<br> artifacts. Furthermore, NPR effects, such as enhanced motion blur, can be<br> achieved. By applying Fourier transforms in the temporal domain, we can also<br> obtain novel tools for analyzing and visualizing time dependent effects.<br> %<br> We demonstrate the effect of different<br> sampling kernels in creating enhanced movies and stills of fast motion.
Export
BibTeX
@inproceedings{Fuchs-et-al_VMV09, TITLE = {A Shaped Temporal Filter Camera}, AUTHOR = {Fuchs, Martin and Chen, Tongbo and Wang, Oliver and Raskar, Ramesh and Seidel, Hans-Peter and Lensch, Hendrik P. A.}, LANGUAGE = {eng}, ISBN = {978-3-9804874-8-1}, URL = {http://www.mpi-inf.mpg.de/resources/stfcamera/stfcamera.web.pdf}, LOCALID = {Local-ID: C125675300671F7B-57E4968A533263AAC125767700576840-Fuchs2008}, PUBLISHER = {Otto-Von-Guericke-Universit{\"a}t Magdeburg, Institut f{\"u}r Simulation und Graphik}, YEAR = {2009}, DATE = {2009}, ABSTRACT = {Digital movie cameras only perform a discrete sampling of real-world imagery.<br> While spatial sampling effects are well studied in the literature, there has<br> not been as much work in regards to temporal sampling. As cameras get faster<br> and faster, the need for conventional frame-rate video that matches the<br> abilities of human perception remains. In this article, we introduce a system<br> with controlled temporal sampling behavior. It transforms a high fps input<br> stream into a conventional speed output video in<br> real-time.<br> %<br> We investigate the effect of different temporal sampling kernels and<br> demonstrate that extended, overlapping kernels can mitigate aliasing<br> artifacts. Furthermore, NPR effects, such as enhanced motion blur, can be<br> achieved. By applying Fourier transforms in the temporal domain, we can also<br> obtain novel tools for analyzing and visualizing time dependent effects.<br> %<br> We demonstrate the effect of different<br> sampling kernels in creating enhanced movies and stills of fast motion.}, BOOKTITLE = {Proceedings of the Vision, Modeling and Visualization Workshop 2009 (VMV 2009)}, EDITOR = {Magnor, Marcus and Rosenhahn, Bodo and Theisel, Holger}, PAGES = {177--186}, ADDRESS = {Braunschweig, Germany}, }
Endnote
%0 Conference Proceedings %A Fuchs, Martin %A Chen, Tongbo %A Wang, Oliver %A Raskar, Ramesh %A Seidel, Hans-Peter %A Lensch, Hendrik P. A. %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Shaped Temporal Filter Camera : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1980-1 %F EDOC: 520467 %U http://www.mpi-inf.mpg.de/resources/stfcamera/stfcamera.web.pdf %F OTHER: Local-ID: C125675300671F7B-57E4968A533263AAC125767700576840-Fuchs2008 %D 2009 %B 14th International Workshop on Vision, Modeling, and Visualization %Z date of event: 2009-11-16 - 2009-11-18 %C Braunschweig, Germany %X Digital movie cameras only perform a discrete sampling of real-world imagery.<br> While spatial sampling effects are well studied in the literature, there has<br> not been as much work in regards to temporal sampling. As cameras get faster<br> and faster, the need for conventional frame-rate video that matches the<br> abilities of human perception remains. In this article, we introduce a system<br> with controlled temporal sampling behavior. It transforms a high fps input<br> stream into a conventional speed output video in<br> real-time.<br> %<br> We investigate the effect of different temporal sampling kernels and<br> demonstrate that extended, overlapping kernels can mitigate aliasing<br> artifacts. Furthermore, NPR effects, such as enhanced motion blur, can be<br> achieved. By applying Fourier transforms in the temporal domain, we can also<br> obtain novel tools for analyzing and visualizing time dependent effects.<br> %<br> We demonstrate the effect of different<br> sampling kernels in creating enhanced movies and stills of fast motion. %B Proceedings of the Vision, Modeling and Visualization Workshop 2009 %E Magnor, Marcus; Rosenhahn, Bodo; Theisel, Holger %P 177 - 186 %I Otto-Von-Guericke-Universit&#228;t Magdeburg, Institut f&#252;r Simulation und Graphik %@ 978-3-9804874-8-1
Dong, Z., Grosch, T., Ritschel, T., Kautz, J., and Seidel, H.-P. 2009. Real-time Indirect Illumination with Clustered Visibility. Vision, Modeling, and Visualization Workshop (VMV 2009), DNB.
Abstract
Visibility computation is often the bottleneck when rendering indirect <br>illumination. However, recent methods based on instant radiosity have <br>demonstrated that accurate visibility is not required for indirect <br>illumination. To exploit this insight, we cluster a large number of virtual <br>point lights -- which represent the indirect illumination when using instant <br>radiosity -- into a small number of virtual area lights. This allows us to <br>compute visibility using recent real-time soft shadow algorithms. Such <br>approximate and fractional from-area visibility is faster to compute and avoids <br>banding when compared to exact binary from-point visibility. Our results show, <br>that the perceptual error of this approximation is negligible and that we <br>achieve real-time frame-rates for large and dynamic scenes.
Export
BibTeX
@inproceedings{Dong-et-al_VMV09, TITLE = {Real-time Indirect Illumination with Clustered Visibility}, AUTHOR = {Dong, Zhao and Grosch, Thorsten and Ritschel, Tobias and Kautz, Jan and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-9804874-8-1}, LOCALID = {Local-ID: C125675300671F7B-03596D01CE337993C12576370067BA17-DongGRKS2009:ClusterVisib}, PUBLISHER = {DNB}, YEAR = {2009}, DATE = {2009}, ABSTRACT = {Visibility computation is often the bottleneck when rendering indirect <br>illumination. However, recent methods based on instant radiosity have <br>demonstrated that accurate visibility is not required for indirect <br>illumination. To exploit this insight, we cluster a large number of virtual <br>point lights -- which represent the indirect illumination when using instant <br>radiosity -- into a small number of virtual area lights. This allows us to <br>compute visibility using recent real-time soft shadow algorithms. Such <br>approximate and fractional from-area visibility is faster to compute and avoids <br>banding when compared to exact binary from-point visibility. Our results show, <br>that the perceptual error of this approximation is negligible and that we <br>achieve real-time frame-rates for large and dynamic scenes.}, BOOKTITLE = {Vision, Modeling, and Visualization Workshop (VMV 2009)}, PAGES = {187--196}, ADDRESS = {Braunschweig, Germany}, }
Endnote
%0 Conference Proceedings %A Dong, Zhao %A Grosch, Thorsten %A Ritschel, Tobias %A Kautz, Jan %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Real-time Indirect Illumination with Clustered Visibility : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-19D1-D %F EDOC: 520471 %F OTHER: Local-ID: C125675300671F7B-03596D01CE337993C12576370067BA17-DongGRKS2009:ClusterVisib %D 2009 %B 14th International Workshop on Vision, Modeling, and Visualization %Z date of event: 2009-11-16 - 2009-11-18 %C Braunschweig, Germany %X Visibility computation is often the bottleneck when rendering indirect <br>illumination. However, recent methods based on instant radiosity have <br>demonstrated that accurate visibility is not required for indirect <br>illumination. To exploit this insight, we cluster a large number of virtual <br>point lights -- which represent the indirect illumination when using instant <br>radiosity -- into a small number of virtual area lights. This allows us to <br>compute visibility using recent real-time soft shadow algorithms. Such <br>approximate and fractional from-area visibility is faster to compute and avoids <br>banding when compared to exact binary from-point visibility. Our results show, <br>that the perceptual error of this approximation is negligible and that we <br>achieve real-time frame-rates for large and dynamic scenes. %B Vision, Modeling, and Visualization Workshop %P 187 - 196 %I DNB %@ 978-3-9804874-8-1
Didyk, P., Eisemann, E., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2009. A Question of Time: Importance and Possibilities of High Refresh-rates. Visual Computing Research Conference, Intel Visual Computing Institute.
Abstract
This work will discuss shortcomings of traditional rendering techniques on today's wide-spread LCD screens. The main observation is that 3D renderings often appear blurred when observed on such a display. Although this might seem to be a shortcoming of the hardware, such blur is actually a consequence of the human visual system perceiving such displays.\\ In this work, we introduce a perception-aware rendering technique that is of very low cost, but significantly improves performance, as well as quality. Especially in conjunction with more recent devices, initially conceived for 3D shutter glasses, our approach achieves significant gains. Besides quality, we show that such approaches even improve task-performance which makes it a crucial component for future interactive applications.
Export
BibTeX
@inproceedings{Didyk2009, TITLE = {A Question of Time: Importance and Possibilities of High Refresh-rates}, AUTHOR = {Didyk, Piotr and Eisemann, Elmar and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, LOCALID = {Local-ID: C125675300671F7B-6F99D73D0B04CA52C12576B9005417E0-Didyk2009}, PUBLISHER = {Intel Visual Computing Institute}, YEAR = {2009}, DATE = {2009}, ABSTRACT = {This work will discuss shortcomings of traditional rendering techniques on today's wide-spread LCD screens. The main observation is that 3D renderings often appear blurred when observed on such a display. Although this might seem to be a shortcoming of the hardware, such blur is actually a consequence of the human visual system perceiving such displays.\\ In this work, we introduce a perception-aware rendering technique that is of very low cost, but significantly improves performance, as well as quality. Especially in conjunction with more recent devices, initially conceived for 3D shutter glasses, our approach achieves significant gains. Besides quality, we show that such approaches even improve task-performance which makes it a crucial component for future interactive applications.}, BOOKTITLE = {Visual Computing Research Conference}, PAGES = {1--3}, }
Endnote
%0 Conference Proceedings %A Didyk, Piotr %A Eisemann, Elmar %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Question of Time: Importance and Possibilities of High Refresh-rates : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-197D-B %F EDOC: 520455 %F OTHER: Local-ID: C125675300671F7B-6F99D73D0B04CA52C12576B9005417E0-Didyk2009 %I Intel Visual Computing Institute %D 2009 %B Untitled Event %Z date of event: 2009-12-08 - 2009-12-10 %C 8-10 December 2009 %X This work will discuss shortcomings of traditional rendering techniques on today's wide-spread LCD screens. The main observation is that 3D renderings often appear blurred when observed on such a display. Although this might seem to be a shortcoming of the hardware, such blur is actually a consequence of the human visual system perceiving such displays.\\ In this work, we introduce a perception-aware rendering technique that is of very low cost, but significantly improves performance, as well as quality. Especially in conjunction with more recent devices, initially conceived for 3D shutter glasses, our approach achieves significant gains. Besides quality, we show that such approaches even improve task-performance which makes it a crucial component for future interactive applications. %B Visual Computing Research Conference %P 1 - 3 %I Intel Visual Computing Institute
Cui, Y., Hasler, N., Thormählen, T., and Seidel, H.-P. 2009. Scale Invariant Feature Transform with Irregular Orientation Histogram Binning. Image Analysis and Recognition (ICIAR 2009), Springer.
Abstract
The SIFT (Scale Invariant Feature Transform) descriptor is a widely used method <br>for matching image features. However, perfect scale invariance can not be <br>achieved in practice because of sampling artefacts, noise in the image data, <br>and the fact that the computational effort limits the number of analyzed scale <br>space images. In this paper we propose a modification of the descriptor's <br>regular grid of orientation histogram bins to an irregular grid. The irregular <br>grid approach reduces the negative effect of scale error and significantly <br>increases the matching precision for image features. Results with a standard <br>data set are presented that show that the irregular grid approach outperforms <br>the original SIFT descriptor and other state-of-the-art extentions.
Export
BibTeX
@inproceedings{Cui-et-al_ICIAR09, TITLE = {Scale Invariant Feature Transform with Irregular Orientation Histogram Binning}, AUTHOR = {Cui, Yan and Hasler, Nils and Thorm{\"a}hlen, Thorsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-642-02610-2}, URL = {http://www.mpi-inf.mpg.de/~hasler/download/CuiHasThoSei09igSIFT.pdf}, DOI = {10.1007/978-3-642-02611-9_26}, LOCALID = {Local-ID: C125675300671F7B-68A3510DE982DAE9C1257583004A992D-CuiHasThoSei09}, PUBLISHER = {Springer}, YEAR = {2009}, DATE = {2009}, ABSTRACT = {The SIFT (Scale Invariant Feature Transform) descriptor is a widely used method <br>for matching image features. However, perfect scale invariance can not be <br>achieved in practice because of sampling artefacts, noise in the image data, <br>and the fact that the computational effort limits the number of analyzed scale <br>space images. In this paper we propose a modification of the descriptor's <br>regular grid of orientation histogram bins to an irregular grid. The irregular <br>grid approach reduces the negative effect of scale error and significantly <br>increases the matching precision for image features. Results with a standard <br>data set are presented that show that the irregular grid approach outperforms <br>the original SIFT descriptor and other state-of-the-art extentions.}, BOOKTITLE = {Image Analysis and Recognition (ICIAR 2009)}, PAGES = {258--267}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {5627}, ADDRESS = {Halifax, Canada}, }
Endnote
%0 Conference Proceedings %A Cui, Yan %A Hasler, Nils %A Thorm&#228;hlen, Thorsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Scale Invariant Feature Transform with Irregular Orientation Histogram Binning : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-19DA-C %F EDOC: 520484 %R 10.1007/978-3-642-02611-9_26 %U http://www.mpi-inf.mpg.de/~hasler/download/CuiHasThoSei09igSIFT.pdf %F OTHER: Local-ID: C125675300671F7B-68A3510DE982DAE9C1257583004A992D-CuiHasThoSei09 %D 2009 %B 6th International Conference on Image Analysis and Recognition %Z date of event: 2009-07-06 - 2009-07-08 %C Halifax, Canada %X The SIFT (Scale Invariant Feature Transform) descriptor is a widely used method <br>for matching image features. However, perfect scale invariance can not be <br>achieved in practice because of sampling artefacts, noise in the image data, <br>and the fact that the computational effort limits the number of analyzed scale <br>space images. In this paper we propose a modification of the descriptor's <br>regular grid of orientation histogram bins to an irregular grid. The irregular <br>grid approach reduces the negative effect of scale error and significantly <br>increases the matching precision for image features. Results with a standard <br>data set are presented that show that the irregular grid approach outperforms <br>the original SIFT descriptor and other state-of-the-art extentions. %B Image Analysis and Recognition %P 258 - 267 %I Springer %@ 978-3-642-02610-2 %B Lecture Notes in Computer Science %N 5627 %U https://rdcu.be/dJkon
Bokeloh, M., Berner, A., Wand, M., Seidel, H.-P., and Schilling, A. 2009. Symmetry Detection Using Feature Lines. Computer Graphics Forum, Blackwell-Wiley.
Export
BibTeX
@inproceedings{Bokeloh-et-al_Eurographics09, TITLE = {Symmetry Detection Using Feature Lines}, AUTHOR = {Bokeloh, Martin and Berner, Alexander and Wand, Michael and Seidel, Hans-Peter and Schilling, Andreas}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2009.01410.x}, LOCALID = {Local-ID: C125675300671F7B-D1896C22D82F8E5EC125754A0050D63E-Bokeloh2008b}, PUBLISHER = {Blackwell-Wiley}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2009}, DATE = {2009}, BOOKTITLE = {Eurographics 2009}, PAGES = {697--706}, JOURNAL = {Computer Graphics Forum}, VOLUME = {28}, ISSUE = {2}, ADDRESS = {Munich, Germany}, }
Endnote
%0 Conference Proceedings %A Bokeloh, Martin %A Berner, Alexander %A Wand, Michael %A Seidel, Hans-Peter %A Schilling, Andreas %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Symmetry Detection Using Feature Lines : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-19E4-3 %F EDOC: 520497 %F OTHER: Local-ID: C125675300671F7B-D1896C22D82F8E5EC125754A0050D63E-Bokeloh2008b %R 10.1111/j.1467-8659.2009.01410.x %D 2009 %Z Review method: peer-reviewed %B 30th Annual Conference of the European Association for Computer Graphics %Z date of event: 2009-03-30 - 2009-04-03 %C Munich, Germany %B Eurographics 2009 %P 697 - 706 %I Blackwell-Wiley %J Computer Graphics Forum %V 28 %N 2 %I Blackwell-Wiley %@ false
Berner, A., Bokeloh, M., Wand, M., Schilling, A., and Seidel, H.-P. 2009. Generalized intrinsic symmetry detection. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
In this paper, we address the problem of detecting partial symmetries in 3D objects. In contrast to previous work, our algorithm is able to match deformed symmetric parts: We first develop an algorithm for the case of approximately isometric deformations, based on matching graphs of surface feature lines that are annotated with intrinsic geometric properties. The sensitivity to non-isometry is controlled by tolerance parameters for each such annotation. Using large tolerance values for some of these annotations and a robust matching of the graph topology yields a more general symmetry detection algorithm that can detect similarities in structures that have undergone strong deformations. This approach for the first time allows for detecting partial intrinsic as well as more general, non-isometric symmetries. We evaluate the recognition performance of our technique for a number synthetic and real-world scanner data sets.
Export
BibTeX
@techreport{BernerBokelohWandSchillingSeidel2009, TITLE = {Generalized intrinsic symmetry detection}, AUTHOR = {Berner, Alexander and Bokeloh, Martin and Wand, Martin and Schilling, Andreas and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2009-4-005}, NUMBER = {MPI-I-2009-4-005}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2009}, DATE = {2009}, ABSTRACT = {In this paper, we address the problem of detecting partial symmetries in 3D objects. In contrast to previous work, our algorithm is able to match deformed symmetric parts: We first develop an algorithm for the case of approximately isometric deformations, based on matching graphs of surface feature lines that are annotated with intrinsic geometric properties. The sensitivity to non-isometry is controlled by tolerance parameters for each such annotation. Using large tolerance values for some of these annotations and a robust matching of the graph topology yields a more general symmetry detection algorithm that can detect similarities in structures that have undergone strong deformations. This approach for the first time allows for detecting partial intrinsic as well as more general, non-isometric symmetries. We evaluate the recognition performance of our technique for a number synthetic and real-world scanner data sets.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Berner, Alexander %A Bokeloh, Martin %A Wand, Martin %A Schilling, Andreas %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Generalized intrinsic symmetry detection : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-666B-3 %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2009-4-005 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2009 %P 33 p. %X In this paper, we address the problem of detecting partial symmetries in 3D objects. In contrast to previous work, our algorithm is able to match deformed symmetric parts: We first develop an algorithm for the case of approximately isometric deformations, based on matching graphs of surface feature lines that are annotated with intrinsic geometric properties. The sensitivity to non-isometry is controlled by tolerance parameters for each such annotation. Using large tolerance values for some of these annotations and a robust matching of the graph topology yields a more general symmetry detection algorithm that can detect similarities in structures that have undergone strong deformations. This approach for the first time allows for detecting partial intrinsic as well as more general, non-isometric symmetries. We evaluate the recognition performance of our technique for a number synthetic and real-world scanner data sets. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Baak, A., Müller, M., Rosenhahn, B., and Seidel, H.-P. 2009. Stabilizing Motion Tracking Using Retrieved Motion Priors. 2009 IEEE 12th International Conference on Computer Vision (ICCV 2009), IEEE.
Export
BibTeX
@inproceedings{Baak-et-al_ICCV09, TITLE = {Stabilizing Motion Tracking Using Retrieved Motion Priors}, AUTHOR = {Baak, Andreas and M{\"u}ller, Meinard and Rosenhahn, Bodo and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4244-4419-9}, DOI = {10.1109/ICCV.2009.5459291}, LOCALID = {Local-ID: C125675300671F7B-F899EC378EBF92EBC125767B00420BA2-BaakRMS09_StabilizedTracking_ICCV}, PUBLISHER = {IEEE}, YEAR = {2009}, DATE = {2009}, BOOKTITLE = {2009 IEEE 12th International Conference on Computer Vision (ICCV 2009)}, EDITOR = {Cipolla, Roberto and Hebert, Martial and Tang, Xiaoou and Yokoya, Naokazu}, PAGES = {1428--1435}, ADDRESS = {Kyoto, Japan}, }
Endnote
%0 Conference Proceedings %A Baak, Andreas %A M&#252;ller, Meinard %A Rosenhahn, Bodo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Stabilizing Motion Tracking Using Retrieved Motion Priors : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-19E2-7 %F EDOC: 520504 %F OTHER: Local-ID: C125675300671F7B-F899EC378EBF92EBC125767B00420BA2-BaakRMS09_StabilizedTracking_ICCV %R 10.1109/ICCV.2009.5459291 %D 2009 %B 2009 IEEE 12th International Conference on Computer Vision %Z date of event: 2009-09-29 - 2009-10-02 %C Kyoto, Japan %B 2009 IEEE 12th International Conference on Computer Vision %E Cipolla, Roberto; Hebert, Martial; Tang, Xiaoou; Yokoya, Naokazu %P 1428 - 1435 %I IEEE %@ 978-1-4244-4419-9
Aydin, T.O., Myszkowski, K., and Seidel, H.-P. 2009. Predicting Display Visibility Under Dynamically Changing Lighting Conditions. Computer Graphics Forum, Blackwell-Wiley.
Abstract
Display devices, more than ever, are finding their ways into electronic <br>consumer goods as a result of recent trends in providing more functionality and <br>user interaction. Combined with the new developments in display technology <br>towards higher reproducible luminance range, the<br>mobility and variation in capability of display devices are constantly <br>increasing. Consequently, in real life usage it is now very likely that the <br>display emission to be distorted by spatially and temporally varying <br>reflections, and the observer's visual system to be not adapted to the <br>particular display that she is viewing at that moment. The actual perception of <br>the display content cannot be fully understood by only considering steady-state <br>illumination and adaptation conditions. We propose an objective method for <br>display visibility analysis formulating the problem as a full-reference image <br>quality assessment problem, where the display emission under ``ideal'' <br>conditions is used as the reference for real-life conditions. Our work includes <br>a human visual system model that accounts for maladaptation and temporal <br>recovery of sensitivity. As an example application we integrate our method to a <br>global illumination simulator and analyze the visibility of a car interior <br>display under realistic lighting conditions.
Export
BibTeX
@inproceedings{Aydin-et-al_Eurographics09, TITLE = {Predicting Display Visibility Under Dynamically Changing Lighting Conditions}, AUTHOR = {Aydin, Tunc O. and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2009.01356.x}, LOCALID = {Local-ID: C125675300671F7B-33AE0A5CE1E47467C125755C00347B6E-Tunc2009EG}, PUBLISHER = {Blackwell-Wiley}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2009}, DATE = {2009}, ABSTRACT = {Display devices, more than ever, are finding their ways into electronic <br>consumer goods as a result of recent trends in providing more functionality and <br>user interaction. Combined with the new developments in display technology <br>towards higher reproducible luminance range, the<br>mobility and variation in capability of display devices are constantly <br>increasing. Consequently, in real life usage it is now very likely that the <br>display emission to be distorted by spatially and temporally varying <br>reflections, and the observer's visual system to be not adapted to the <br>particular display that she is viewing at that moment. The actual perception of <br>the display content cannot be fully understood by only considering steady-state <br>illumination and adaptation conditions. We propose an objective method for <br>display visibility analysis formulating the problem as a full-reference image <br>quality assessment problem, where the display emission under ``ideal'' <br>conditions is used as the reference for real-life conditions. Our work includes <br>a human visual system model that accounts for maladaptation and temporal <br>recovery of sensitivity. As an example application we integrate our method to a <br>global illumination simulator and analyze the visibility of a car interior <br>display under realistic lighting conditions.}, BOOKTITLE = {Eurographics 2009}, PAGES = {173--182}, JOURNAL = {Computer Graphics Forum}, VOLUME = {28}, ISSUE = {2}, ADDRESS = {Munich, Germany}, }
Endnote
%0 Conference Proceedings %A Aydin, Tunc O. %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Predicting Display Visibility Under Dynamically Changing Lighting Conditions : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-19CB-C %F EDOC: 520442 %F OTHER: Local-ID: C125675300671F7B-33AE0A5CE1E47467C125755C00347B6E-Tunc2009EG %R 10.1111/j.1467-8659.2009.01356.x %D 2009 %Z Review method: peer-reviewed %B 30th Annual Conference of the European Association for Computer Graphics %Z date of event: 2009-03-30 - 2009-04-03 %C Munich, Germany %X Display devices, more than ever, are finding their ways into electronic <br>consumer goods as a result of recent trends in providing more functionality and <br>user interaction. Combined with the new developments in display technology <br>towards higher reproducible luminance range, the<br>mobility and variation in capability of display devices are constantly <br>increasing. Consequently, in real life usage it is now very likely that the <br>display emission to be distorted by spatially and temporally varying <br>reflections, and the observer's visual system to be not adapted to the <br>particular display that she is viewing at that moment. The actual perception of <br>the display content cannot be fully understood by only considering steady-state <br>illumination and adaptation conditions. We propose an objective method for <br>display visibility analysis formulating the problem as a full-reference image <br>quality assessment problem, where the display emission under ``ideal'' <br>conditions is used as the reference for real-life conditions. Our work includes <br>a human visual system model that accounts for maladaptation and temporal <br>recovery of sensitivity. As an example application we integrate our method to a <br>global illumination simulator and analyze the visibility of a car interior <br>display under realistic lighting conditions. %B Eurographics 2009 %P 173 - 182 %I Blackwell-Wiley %J Computer Graphics Forum %V 28 %N 2 %I Blackwell-Wiley %@ false
2008
Zimmer, H.L., Bruhn, A., Valgaerts, L., et al. 2008. PDE-based Anisotropic Disparity-driven Stereo Vision. Vision, Modeling, and Visualization 2008 (VMV 2008), Akademische Verlagsgesellschaft Aka.
Export
BibTeX
@inproceedings{ZimmerVMV2008, TITLE = {{PDE}-based Anisotropic Disparity-driven Stereo Vision}, AUTHOR = {Zimmer, Henning Lars and Bruhn, Andreas and Valgaerts, Levi and Breu{\ss}, Michael and Weickert, Joachim and Rosenhahn, Bodo and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-89838-609-8}, PUBLISHER = {Akademische Verlagsgesellschaft Aka}, YEAR = {2008}, DATE = {2008}, BOOKTITLE = {Vision, Modeling, and Visualization 2008 (VMV 2008)}, EDITOR = {Deussen, Oliver and Keim, Daniel and Saupe, Dietmar}, PAGES = {263--272}, ADDRESS = {Konstanz, Germany}, }
Endnote
%0 Conference Proceedings %A Zimmer, Henning Lars %A Bruhn, Andreas %A Valgaerts, Levi %A Breu&#223;, Michael %A Weickert, Joachim %A Rosenhahn, Bodo %A Seidel, Hans-Peter %+ International Max Planck Research School, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T PDE-based Anisotropic Disparity-driven Stereo Vision : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0023-CDF9-A %D 2008 %B 13th International Fall Workshop Vision, Modeling, and Visualization 2008 %Z date of event: 2008-10-08 - 2008-10-10 %C Konstanz, Germany %B Vision, Modeling, and Visualization 2008 %E Deussen, Oliver; Keim, Daniel; Saupe, Dietmar %P 263 - 272 %I Akademische Verlagsgesellschaft Aka %@ 978-3-89838-609-8
Ziegler, G., Theobalt, C., Seidel, H.-P., and Dyken, C. 2008. High-speed Marching Cubes using HistoPyramids. Computer Graphics Forum27, 8.
Abstract
We present an implementation approach for Marching Cubes (MC) on graphics <br>hardware for OpenGL 2.0 or comparable graphics APIs. It currently outperforms <br>all other known graphics processing units (GPU)-based iso-surface extraction <br>algorithms in direct rendering for sparse or large volumes, even those using <br>the recently introduced geometry shader (GS) capabilites. To achieve this, we <br>outfit the Histogram Pyramid (HP) algorithm, previously only used in GPU data <br>compaction, with the capability for arbitrary data expansion. After <br>reformulation of MC as a data compaction and expansion process, the HP <br>algorithm becomes the core of a highly efficient and interactive MC <br>implementation. For graphics hardware lacking GSs, such as mobile GPUs, the <br>concept of HP data expansion is easily generalized, opening new application <br>domains in mobile visual computing. Further, to serve recent developments, we <br>present how the HP can be implemented in the parallel programming language CUDA <br>(compute unified device architecture), by using a novel 1D chunk/layer <br>construction.
Export
BibTeX
@article{Ziegler-et-al_CGF08, TITLE = {High-speed Marching Cubes using {HistoPyramids}}, AUTHOR = {Ziegler, Gernot and Theobalt, Christian and Seidel, Hans-Peter and Dyken, Christopher}, LANGUAGE = {eng}, ISSN = {0167-7055}, URL = {http://dx.doi.org/10.1111/j.1467-8659.2008.01182.x}, DOI = {10.1111/j.1467-8659.2008.01182.x}, LOCALID = {Local-ID: C125756E0038A185-B3A724743E5FF8D9C125755E00822229-Dyken2008}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {We present an implementation approach for Marching Cubes (MC) on graphics <br>hardware for OpenGL 2.0 or comparable graphics APIs. It currently outperforms <br>all other known graphics processing units (GPU)-based iso-surface extraction <br>algorithms in direct rendering for sparse or large volumes, even those using <br>the recently introduced geometry shader (GS) capabilites. To achieve this, we <br>outfit the Histogram Pyramid (HP) algorithm, previously only used in GPU data <br>compaction, with the capability for arbitrary data expansion. After <br>reformulation of MC as a data compaction and expansion process, the HP <br>algorithm becomes the core of a highly efficient and interactive MC <br>implementation. For graphics hardware lacking GSs, such as mobile GPUs, the <br>concept of HP data expansion is easily generalized, opening new application <br>domains in mobile visual computing. Further, to serve recent developments, we <br>present how the HP can be implemented in the parallel programming language CUDA <br>(compute unified device architecture), by using a novel 1D chunk/layer <br>construction.}, JOURNAL = {Computer Graphics Forum}, VOLUME = {27}, NUMBER = {8}, PAGES = {2028--2039}, }
Endnote
%0 Journal Article %A Ziegler, Gernot %A Theobalt, Christian %A Seidel, Hans-Peter %A Dyken, Christopher %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T High-speed Marching Cubes using HistoPyramids : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1BDD-5 %F EDOC: 428260 %R 10.1111/j.1467-8659.2008.01182.x %U http://dx.doi.org/10.1111/j.1467-8659.2008.01182.x %F OTHER: Local-ID: C125756E0038A185-B3A724743E5FF8D9C125755E00822229-Dyken2008 %D 2008 %* Review method: peer-reviewed %X We present an implementation approach for Marching Cubes (MC) on graphics <br>hardware for OpenGL 2.0 or comparable graphics APIs. It currently outperforms <br>all other known graphics processing units (GPU)-based iso-surface extraction <br>algorithms in direct rendering for sparse or large volumes, even those using <br>the recently introduced geometry shader (GS) capabilites. To achieve this, we <br>outfit the Histogram Pyramid (HP) algorithm, previously only used in GPU data <br>compaction, with the capability for arbitrary data expansion. After <br>reformulation of MC as a data compaction and expansion process, the HP <br>algorithm becomes the core of a highly efficient and interactive MC <br>implementation. For graphics hardware lacking GSs, such as mobile GPUs, the <br>concept of HP data expansion is easily generalized, opening new application <br>domains in mobile visual computing. Further, to serve recent developments, we <br>present how the HP can be implemented in the parallel programming language CUDA <br>(compute unified device architecture), by using a novel 1D chunk/layer <br>construction. %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 27 %N 8 %& 2028 %P 2028 - 2039 %I Blackwell-Wiley %C Oxford %@ false
Yoshizawa, S., Belyaev, A., Yokota, H., and Seidel, H.-P. 2008. Fast, Robust, and Faithful Methods for Detecting Crest Lines on Meshes. Computer Aided Geometric Design25, 8.
Abstract
The crest lines, salient subsets of the extrema of the principal curvatures <br>over their corresponding curvature lines, are powerful shape descriptors which <br>are widely used for shape matching, interrogation, and visualization purposes. <br>In this paper, we develop fast, accurate, and reliable methods for detecting <br>the crest lines on surfaces approximated by dense triangle meshes. The methods <br>exploit intrinsic geometric properties of the curvature extrema and provide <br>with an inherent level-of-detail control of the detected crest lines. As an <br>immediate application, we use of the crest lines for adaptive mesh <br>simplification purposes.
Export
BibTeX
@article{Yoshizawa-et-al_CAGD08, TITLE = {Fast, Robust, and Faithful Methods for Detecting Crest Lines on Meshes}, AUTHOR = {Yoshizawa, Shin and Belyaev, Alexander and Yokota, Hideo and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-8396}, DOI = {10.1016/j.cagd.2008.06.008}, LOCALID = {Local-ID: C125756E0038A185-29736E776E60E92EC12574DB000335A3-Yoshizawa2008}, PUBLISHER = {North-Holland}, ADDRESS = {Amsterdam}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {The crest lines, salient subsets of the extrema of the principal curvatures <br>over their corresponding curvature lines, are powerful shape descriptors which <br>are widely used for shape matching, interrogation, and visualization purposes. <br>In this paper, we develop fast, accurate, and reliable methods for detecting <br>the crest lines on surfaces approximated by dense triangle meshes. The methods <br>exploit intrinsic geometric properties of the curvature extrema and provide <br>with an inherent level-of-detail control of the detected crest lines. As an <br>immediate application, we use of the crest lines for adaptive mesh <br>simplification purposes.}, JOURNAL = {Computer Aided Geometric Design}, VOLUME = {25}, NUMBER = {8}, PAGES = {545--560}, }
Endnote
%0 Journal Article %A Yoshizawa, Shin %A Belyaev, Alexander %A Yokota, Hideo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Fast, Robust, and Faithful Methods for Detecting Crest Lines on Meshes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1BAD-2 %F EDOC: 428259 %R 10.1016/j.cagd.2008.06.008 %F OTHER: Local-ID: C125756E0038A185-29736E776E60E92EC12574DB000335A3-Yoshizawa2008 %D 2008 %* Review method: peer-reviewed %X The crest lines, salient subsets of the extrema of the principal curvatures <br>over their corresponding curvature lines, are powerful shape descriptors which <br>are widely used for shape matching, interrogation, and visualization purposes. <br>In this paper, we develop fast, accurate, and reliable methods for detecting <br>the crest lines on surfaces approximated by dense triangle meshes. The methods <br>exploit intrinsic geometric properties of the curvature extrema and provide <br>with an inherent level-of-detail control of the detected crest lines. As an <br>immediate application, we use of the crest lines for adaptive mesh <br>simplification purposes. %J Computer Aided Geometric Design %V 25 %N 8 %& 545 %P 545 - 560 %I North-Holland %C Amsterdam %@ false
Yoshida, A., Ihrke, M., Mantiuk, R., and Seidel, H.-P. 2008a. Brightness of the Glare Illusion. APGV ’08: Proceedings of the 5th Symposium on Applied Perception in Graphics and Visualization, ACM.
Export
BibTeX
@inproceedings{Yoshida-et-al_APGV08, TITLE = {Brightness of the Glare Illusion}, AUTHOR = {Yoshida, Akiko and Ihrke, Matthias and Mantiuk, Rafa{\l} and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-59593-981-4}, URL = {http://www.mpi-inf.mpg.de/~yoshida/Yoshida_APGV2008.pdf}, DOI = {10.1145/1394281.1394297}, LOCALID = {Local-ID: C125756E0038A185-0747F286D3E9D7EDC12574410035A60A-Yoshida2008_APGV}, PUBLISHER = {ACM}, YEAR = {2008}, DATE = {2008}, BOOKTITLE = {APGV '08: Proceedings of the 5th Symposium on Applied Perception in Graphics and Visualization}, EDITOR = {Creem-Regehr, Sarah and Myszkowski, Karol}, PAGES = {83--90}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Yoshida, Akiko %A Ihrke, Matthias %A Mantiuk, Rafa&#322; %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Brightness of the Glare Illusion : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1B26-E %F EDOC: 428257 %U http://www.mpi-inf.mpg.de/~yoshida/Yoshida_APGV2008.pdf %F OTHER: Local-ID: C125756E0038A185-0747F286D3E9D7EDC12574410035A60A-Yoshida2008_APGV %R 10.1145/1394281.1394297 %D 2008 %B 5th Symposium on Applied Perception in Graphics and Visualization %Z date of event: 2008-08-09 - 2008-08-10 %C Los Angeles, CA, USA %B APGV '08: Proceedings of the 5th Symposium on Applied Perception in Graphics and Visualization %E Creem-Regehr, Sarah; Myszkowski, Karol %P 83 - 90 %I ACM %@ 978-1-59593-981-4
Yoshida, A., Krawczyk, G., Myszkowski, K., and Seidel, H.-P. 2008b. Perception-based Contrast Enhancement Model for Complex Images in High Dynamic Range. Human Vision and Electronic Imaging XIII, SPIE.
Export
BibTeX
@inproceedings{Yoshida-et-al_HMEI08, TITLE = {Perception-based Contrast Enhancement Model for Complex Images in High Dynamic Range}, AUTHOR = {Yoshida, Akiko and Krawczyk, Grzegorz and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-0-8194-6978-6}, URL = {http://dx.doi.org/10.1117/12.766500}, DOI = {10.1117/12.766500}, LOCALID = {Local-ID: C125756E0038A185-1AF67FD9509EB0FAC12573AF006318E7-Yoshida_SPIE2008}, PUBLISHER = {SPIE}, YEAR = {2008}, DATE = {2008}, BOOKTITLE = {Human Vision and Electronic Imaging XIII}, EDITOR = {Rogowitz, Bernice E. and Pappas, Thrasyvoulos N.}, PAGES = {68060C-1--68060C-11}, SERIES = {Proceedings of SPIE-IS\&T Electronic Imaging}, VOLUME = {6806}, ADDRESS = {San Jose, CA, USA}, }
Endnote
%0 Conference Proceedings %A Yoshida, Akiko %A Krawczyk, Grzegorz %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perception-based Contrast Enhancement Model for Complex Images in High Dynamic Range : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1CA2-3 %F EDOC: 428258 %R 10.1117/12.766500 %U http://dx.doi.org/10.1117/12.766500 %F OTHER: Local-ID: C125756E0038A185-1AF67FD9509EB0FAC12573AF006318E7-Yoshida_SPIE2008 %D 2008 %B Human Vision and Electronic Imaging XIII %Z date of event: 2008-01-28 - 2008-01-31 %C San Jose, CA, USA %B Human Vision and Electronic Imaging XIII %E Rogowitz, Bernice E.; Pappas, Thrasyvoulos N. %P 68060C-1 - 68060C-11 %I SPIE %@ 978-0-8194-6978-6 %B Proceedings of SPIE-IS&T Electronic Imaging %N 6806
Wang, D., Belyaev, A., Saleem, W., and Seidel, H.-P. 2008. Shape Complexity from Image Similarity. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
We present an approach to automatically compute the complexity of a given 3D shape. Previous approaches have made use of geometric and/or topological properties of the 3D shape to compute complexity. Our approach is based on shape appearance and estimates the complexity of a given 3D shape according to how 2D views of the shape diverge from each other. We use similarity among views of the 3D shape as the basis for our complexity computation. Hence our approach uses claims from psychology that humans mentally represent 3D shapes as organizations of 2D views and, therefore, mimics how humans gauge shape complexity. Experimental results show that our approach produces results that are more in agreement with the human notion of shape complexity than those obtained using previous approaches.
Export
BibTeX
@techreport{WangBelyaevSaleemSeidel2008, TITLE = {Shape Complexity from Image Similarity}, AUTHOR = {Wang, Danyi and Belyaev, Alexander and Saleem, Waqar and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2008-4-002}, NUMBER = {MPI-I-2008-4-002}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {We present an approach to automatically compute the complexity of a given 3D shape. Previous approaches have made use of geometric and/or topological properties of the 3D shape to compute complexity. Our approach is based on shape appearance and estimates the complexity of a given 3D shape according to how 2D views of the shape diverge from each other. We use similarity among views of the 3D shape as the basis for our complexity computation. Hence our approach uses claims from psychology that humans mentally represent 3D shapes as organizations of 2D views and, therefore, mimics how humans gauge shape complexity. Experimental results show that our approach produces results that are more in agreement with the human notion of shape complexity than those obtained using previous approaches.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Wang, Danyi %A Belyaev, Alexander %A Saleem, Waqar %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Shape Complexity from Image Similarity : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-66B9-6 %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2008-4-002 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2008 %P 28 p. %X We present an approach to automatically compute the complexity of a given 3D shape. Previous approaches have made use of geometric and/or topological properties of the 3D shape to compute complexity. Our approach is based on shape appearance and estimates the complexity of a given 3D shape according to how 2D views of the shape diverge from each other. We use similarity among views of the 3D shape as the basis for our complexity computation. Hence our approach uses claims from psychology that humans mentally represent 3D shapes as organizations of 2D views and, therefore, mimics how humans gauge shape complexity. Experimental results show that our approach produces results that are more in agreement with the human notion of shape complexity than those obtained using previous approaches. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Wand, M., Berner, A., Bokeloh, M., et al. 2008. Processing and Interactive Editing of Huge Point Clouds from 3D Scanners. Computers and Graphics32, 2.
Export
BibTeX
@article{Wand-et-al_CG08, TITLE = {Processing and Interactive Editing of Huge Point Clouds from {3D} Scanners}, AUTHOR = {Wand, Michael and Berner, Alexander and Bokeloh, Martin and Jenke, Philipp and Fleck, Arno and Hoffmann, Mark and Maier, Benjamin and Staneker, Dirk and Schilling, Andreas and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0097-8493}, URL = {http://dx.doi.org/10.1016/j.cag.2008.01.010}, DOI = {10.1016/j.cag.2008.01.010}, LOCALID = {Local-ID: C125756E0038A185-57763414823072F0C125754A004772EF-Wand2008a}, PUBLISHER = {Pergamon}, ADDRESS = {New York}, YEAR = {2008}, DATE = {2008}, JOURNAL = {Computers and Graphics}, VOLUME = {32}, NUMBER = {2}, PAGES = {204--220}, }
Endnote
%0 Journal Article %A Wand, Michael %A Berner, Alexander %A Bokeloh, Martin %A Jenke, Philipp %A Fleck, Arno %A Hoffmann, Mark %A Maier, Benjamin %A Staneker, Dirk %A Schilling, Andreas %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Processing and Interactive Editing of Huge Point Clouds from 3D Scanners : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1CB9-2 %F EDOC: 428253 %R 10.1016/j.cag.2008.01.010 %U http://dx.doi.org/10.1016/j.cag.2008.01.010 %F OTHER: Local-ID: C125756E0038A185-57763414823072F0C125754A004772EF-Wand2008a %D 2008 %* Review method: peer-reviewed %J Computers and Graphics %V 32 %N 2 %& 204 %P 204 - 220 %I Pergamon %C New York %@ false
Von Funck, W., Theisel, H., and Seidel, H.-P. 2008a. Volume-preserving Mesh Skinning. Vision, Modeling, and Visualization 2008 (VMV 2008), Akademische Verlagsgesellschaft Aka.
Abstract
Light transport in complex scenes with possibly intricate optical properties is difficult to grasp intuitively. The study of light transport has so far mainly been conducted by indirect observations. Cameras or human observers typically only sense the radiance reflected from a scene, i.e. the light directly emitted or reflected from the last bounce of a possibly much longer light path. Models for the propagation of light, however, typically assume light waves or rays, concepts which so far have been communicated in an abstract way using formulas or sketches. In this paper, we propose the use of fluorescent fluids for direct visualization of light transport in the real world. In the fluorescent fluid the traces of light become visible as a small fraction of the energy transported along the ray is scattered out towards the viewer. We demonstrate this visualization for direct illumination effects such as reflections and refractions at various surfaces, as well as for global effects such as subsurface light transport in translucent material, caustics, or interreflections. As this allows for the inspection of entire light paths, rather than the last scattering event, we believe that this novel visualization can help to intuitively explain the phenomena of light transport to students and experts alike.
Export
BibTeX
@inproceedings{vonFunckVMV2008, TITLE = {Volume-preserving Mesh Skinning}, AUTHOR = {von Funck, Wolfram and Theisel, Holger and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-89838-609-8}, PUBLISHER = {Akademische Verlagsgesellschaft Aka}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {Light transport in complex scenes with possibly intricate optical properties is difficult to grasp intuitively. The study of light transport has so far mainly been conducted by indirect observations. Cameras or human observers typically only sense the radiance reflected from a scene, i.e. the light directly emitted or reflected from the last bounce of a possibly much longer light path. Models for the propagation of light, however, typically assume light waves or rays, concepts which so far have been communicated in an abstract way using formulas or sketches. In this paper, we propose the use of fluorescent fluids for direct visualization of light transport in the real world. In the fluorescent fluid the traces of light become visible as a small fraction of the energy transported along the ray is scattered out towards the viewer. We demonstrate this visualization for direct illumination effects such as reflections and refractions at various surfaces, as well as for global effects such as subsurface light transport in translucent material, caustics, or interreflections. As this allows for the inspection of entire light paths, rather than the last scattering event, we believe that this novel visualization can help to intuitively explain the phenomena of light transport to students and experts alike.}, BOOKTITLE = {Vision, Modeling, and Visualization 2008 (VMV 2008)}, EDITOR = {Deussen, Oliver and Keim, Daniel and Saupe, Dietmar}, PAGES = {409--414}, ADDRESS = {Konstanz, Germany}, }
Endnote
%0 Conference Proceedings %A von Funck, Wolfram %A Theisel, Holger %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Volume-preserving Mesh Skinning : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0023-CDF2-7 %D 2008 %B 13th International Fall Workshop Vision, Modeling, and Visualization 2008 %Z date of event: 2008-10-08 - 2008-10-10 %C Konstanz, Germany %X Light transport in complex scenes with possibly intricate optical properties is difficult to grasp intuitively. The study of light transport has so far mainly been conducted by indirect observations. Cameras or human observers typically only sense the radiance reflected from a scene, i.e. the light directly emitted or reflected from the last bounce of a possibly much longer light path. Models for the propagation of light, however, typically assume light waves or rays, concepts which so far have been communicated in an abstract way using formulas or sketches. In this paper, we propose the use of fluorescent fluids for direct visualization of light transport in the real world. In the fluorescent fluid the traces of light become visible as a small fraction of the energy transported along the ray is scattered out towards the viewer. We demonstrate this visualization for direct illumination effects such as reflections and refractions at various surfaces, as well as for global effects such as subsurface light transport in translucent material, caustics, or interreflections. As this allows for the inspection of entire light paths, rather than the last scattering event, we believe that this novel visualization can help to intuitively explain the phenomena of light transport to students and experts alike. %B Vision, Modeling, and Visualization 2008 %E Deussen, Oliver; Keim, Daniel; Saupe, Dietmar %P 409 - 414 %I Akademische Verlagsgesellschaft Aka %@ 978-3-89838-609-8
Von Funck, W., Weinkauf, T., Theisel, H., and Seidel, H.-P. 2008b. Smoke Surfaces: An Interactive Flow Visualization Technique Inspired by Real-world Flow Experiments. IEEE Transactions on Visualization and Computer Graphics14, 6.
Export
BibTeX
@article{Weinkauf-et-al_TVCG08, TITLE = {Smoke Surfaces: An Interactive Flow Visualization Technique Inspired by Real-world Flow Experiments}, AUTHOR = {von Funck, Wolfram and Weinkauf, Tino and Theisel, Holger and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2008.163}, LOCALID = {Local-ID: C125756E0038A185-7124A7C3DB940128C12575590045C7A4-FunckTransVCG2008}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {New York, NY}, YEAR = {2008}, DATE = {2008}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics}, VOLUME = {14}, NUMBER = {6}, PAGES = {1396--1403}, }
Endnote
%0 Journal Article %A von Funck, Wolfram %A Weinkauf, Tino %A Theisel, Holger %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Smoke Surfaces: An Interactive Flow Visualization Technique Inspired by Real-world Flow Experiments : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1CF7-5 %F EDOC: 428251 %F OTHER: Local-ID: C125756E0038A185-7124A7C3DB940128C12575590045C7A4-FunckTransVCG2008 %R 10.1109/TVCG.2008.163 %D 2008 %* Review method: peer-reviewed %J IEEE Transactions on Visualization and Computer Graphics %V 14 %N 6 %& 1396 %P 1396 - 1403 %I IEEE Computer Society %C New York, NY %@ false
Thormählen, T., Hasler, N., Wand, M., and Seidel, H.-P. 2008. Merging of Feature Tracks for Camera Motion Estimation from Video. 5th European Conference on Visual Media Production (CVMP 2008), IET.
Export
BibTeX
@inproceedings{Thormaehlen2008, TITLE = {Merging of Feature Tracks for Camera Motion Estimation from Video}, AUTHOR = {Thorm{\"a}hlen, Thorsten and Hasler, Nils and Wand, Michael and Seidel, Hans-Peter}, LANGUAGE = {eng}, LOCALID = {Local-ID: C125756E0038A185-5D87FC91EAE1A3E8C125754A00501F2B-Thormaehlen2008}, PUBLISHER = {IET}, YEAR = {2009}, DATE = {2008}, BOOKTITLE = {5th European Conference on Visual Media Production (CVMP 2008)}, PAGES = {1--8}, }
Endnote
%0 Conference Proceedings %A Thorm&#228;hlen, Thorsten %A Hasler, Nils %A Wand, Michael %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Merging of Feature Tracks for Camera Motion Estimation from Video : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1C35-8 %F EDOC: 428240 %F OTHER: Local-ID: C125756E0038A185-5D87FC91EAE1A3E8C125754A00501F2B-Thormaehlen2008 %I IET %D 2008 %B Untitled Event %Z date of event: 2009-01-26 - 2009-01-27 %C London, UK %B 5th European Conference on Visual Media Production (CVMP 2008) %P 1 - 8 %I IET
Thormählen, T. and Seidel, H.-P. 2008. 3D-modeling by Ortho-image Generation from Image Sequences. ACM Transactions on Graphics (Proc. SIGGRAPH 2008), ACM.
Export
BibTeX
@inproceedings{Thormahlen-Seidel_SIGGRAPH08, TITLE = {{3D}-modeling by Ortho-image Generation from Image Sequences}, AUTHOR = {Thorm{\"a}hlen, Thorsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/1360612.1360685}, LOCALID = {Local-ID: C125756E0038A185-B889AC35E8A01F86C125755C003F879C-Thorm&#228;hlen2008Ortho}, PUBLISHER = {ACM}, PUBLISHER = {Association for Computing Machinery}, YEAR = {2008}, DATE = {2008}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2008}, EDITOR = {Turk, Greg}, PAGES = {86.1--86.5}, JOURNAL = {ACM Transactions on Graphics (Proc. SIGGRAPH)}, VOLUME = {27}, ISSUE = {3}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Thorm&#228;hlen, Thorsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T 3D-modeling by Ortho-image Generation from Image Sequences : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1AB6-3 %F EDOC: 428241 %R 10.1145/1360612.1360685 %F OTHER: Local-ID: C125756E0038A185-B889AC35E8A01F86C125755C003F879C-Thorm&#228;hlen2008Ortho %D 2008 %B ACM SIGGRAPH 2008 %Z date of event: 2008-08-11 - 2008-08-15 %C Los Angeles, CA, USA %B Proceedings of ACM SIGGRAPH 2008 %E Turk, Greg %P 86.1 - 86.5 %I ACM %J ACM Transactions on Graphics %V 27 %N 3 %I Association for Computing Machinery %@ false
Theobalt, C., Magnor, M., and Seidel, H.-P. 2008a. Video-based Capturing and Rendering of People. In: Human Motion. Springer, Dordrecht.
Abstract
In this chapter, we present a model-free pose estimation algorithm to estimate the relative pose of a rigid object. In the context of human motion, a rigid object can be either a limb, the head, or the back. In most pose estimation algorithms, the object of interest covers a large image area. We focus on pose estimation of objects covering a field of view of less than 5$^\circ$\ by 5$^\circ$\ using stereo vision. With this new algorithm suitable for small objects, we investigate the effect of the object size on the pose accuracy. In addition, we introduce an object tracking technique that is insensitive to partial occlusion. We are particularly interested in human motion in this context focusing on crash test dummies. The main application for this method is the analysis of crash video sequences. For a human motion capture system, a connection of the various limbs can be done in an additional step. The ultimate goal is to fully obtain the motion of crash test dummies in a vehicle crash. This would give information on which body part is exposed to what kind of forces and rotational forces could be determined as well. Knowing all this, car manufacturers can optimize the passive safety components to reduce forces on the dummy and ultimately on the real vehicle passengers. Since camera images for crash videos contain the whole crash vehicle, the size of the crash test dummies is relatively small in our experiments. For these experiments, mostly high-speed cameras with high resolution are used. However, the method described here easily extends to real-time robotics applications with smaller VGA-size images, where relative pose estimation is needed, {e.g.}\ for manipulator control.
Export
BibTeX
@incollection{TheMagSei08, TITLE = {Video-based Capturing and Rendering of People}, AUTHOR = {Theobalt, Christian and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4020-6692-4; 978-1-4020-6693-1}, DOI = {10.1007/978-1-4020-6693-1_22}, PUBLISHER = {Springer}, ADDRESS = {Dordrecht}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {In this chapter, we present a model-free pose estimation algorithm to estimate the relative pose of a rigid object. In the context of human motion, a rigid object can be either a limb, the head, or the back. In most pose estimation algorithms, the object of interest covers a large image area. We focus on pose estimation of objects covering a field of view of less than 5$^\circ$\ by 5$^\circ$\ using stereo vision. With this new algorithm suitable for small objects, we investigate the effect of the object size on the pose accuracy. In addition, we introduce an object tracking technique that is insensitive to partial occlusion. We are particularly interested in human motion in this context focusing on crash test dummies. The main application for this method is the analysis of crash video sequences. For a human motion capture system, a connection of the various limbs can be done in an additional step. The ultimate goal is to fully obtain the motion of crash test dummies in a vehicle crash. This would give information on which body part is exposed to what kind of forces and rotational forces could be determined as well. Knowing all this, car manufacturers can optimize the passive safety components to reduce forces on the dummy and ultimately on the real vehicle passengers. Since camera images for crash videos contain the whole crash vehicle, the size of the crash test dummies is relatively small in our experiments. For these experiments, mostly high-speed cameras with high resolution are used. However, the method described here easily extends to real-time robotics applications with smaller VGA-size images, where relative pose estimation is needed, {e.g.}\ for manipulator control.}, BOOKTITLE = {Human Motion}, EDITOR = {Rosenhahn, Bodo and Klette, Reinhard and Metaxas, Dimitris}, PAGES = {531--559}, SERIES = {Computational Imaging and Vision}, VOLUME = {36}, }
Endnote
%0 Book Section %A Theobalt, Christian %A Magnor, Marcus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Video-based Capturing and Rendering of People : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0023-CE39-1 %R 10.1007/978-1-4020-6693-1_22 %D 2008 %X In this chapter, we present a model-free pose estimation algorithm to estimate the relative pose of a rigid object. In the context of human motion, a rigid object can be either a limb, the head, or the back. In most pose estimation algorithms, the object of interest covers a large image area. We focus on pose estimation of objects covering a field of view of less than 5$^\circ$\ by 5$^\circ$\ using stereo vision. With this new algorithm suitable for small objects, we investigate the effect of the object size on the pose accuracy. In addition, we introduce an object tracking technique that is insensitive to partial occlusion. We are particularly interested in human motion in this context focusing on crash test dummies. The main application for this method is the analysis of crash video sequences. For a human motion capture system, a connection of the various limbs can be done in an additional step. The ultimate goal is to fully obtain the motion of crash test dummies in a vehicle crash. This would give information on which body part is exposed to what kind of forces and rotational forces could be determined as well. Knowing all this, car manufacturers can optimize the passive safety components to reduce forces on the dummy and ultimately on the real vehicle passengers. Since camera images for crash videos contain the whole crash vehicle, the size of the crash test dummies is relatively small in our experiments. For these experiments, mostly high-speed cameras with high resolution are used. However, the method described here easily extends to real-time robotics applications with smaller VGA-size images, where relative pose estimation is needed, {e.g.}\ for manipulator control. %B Human Motion %E Rosenhahn, Bodo; Klette, Reinhard; Metaxas, Dimitris %P 531 - 559 %I Springer %C Dordrecht %@ 978-1-4020-6692-4 978-1-4020-6693-1 %S Computational Imaging and Vision %N 36
Theobalt, C., de Aguiar, E., Magnor, M., and Seidel, H.-P. 2008b. Reconstructing Human Shape, Motion and Appearance from Multi-view Video. In: Three-Dimensional Television: Capture, Transmission, and Display. Springer, Berlin.
Abstract
XX
Export
BibTeX
@incollection{deAguiar2007_book, TITLE = {Reconstructing Human Shape, Motion and Appearance from Multi-view Video}, AUTHOR = {Theobalt, Christian and de Aguiar, Edilson and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-540-72531-2}, LOCALID = {Local-ID: C125756E0038A185-5DAA629C768B6A3DC12573B6000D1AFF-deAguiar2007_book}, PUBLISHER = {Springer}, ADDRESS = {Berlin}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {XX}, BOOKTITLE = {Three-Dimensional Television: Capture, Transmission, and Display}, EDITOR = {Ozaktas, Haldun and Onural, Levent}, PAGES = {29--58}, SERIES = {Signals and communication technology}, }
Endnote
%0 Book Section %A Theobalt, Christian %A de Aguiar, Edilson %A Magnor, Marcus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Reconstructing Human Shape, Motion and Appearance from Multi-view Video : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1CC7-2 %F EDOC: 428237 %F OTHER: Local-ID: C125756E0038A185-5DAA629C768B6A3DC12573B6000D1AFF-deAguiar2007_book %D 2008 %X XX %B Three-Dimensional Television: Capture, Transmission, and Display %E Ozaktas, Haldun; Onural, Levent %P 29 - 58 %I Springer %C Berlin %@ 978-3-540-72531-2 %S Signals and communication technology
Tevs, A., Ihrke, I., and Seidel, H.-P. 2008. Maximum Mipmaps for Fast, Accurate, and Scalable Dynamic Height Field Rendering. I3D ’08: Proceedings of the 2008 Symposium on Interactive 3D Graphics and Games, ACM.
Abstract
This paper presents a GPU-based, fast, and accurate dynamic height field <br>rendering technique that scales well to large scale height fields. Current <br>real-time rendering algorithms for dynamic height fields employ approximate <br>ray-height field intersection methods, whereas accurate algorithms require <br>pre-computation in the order of seconds to minutes and are thus not suitable <br>for dynamic height field rendering. We alleviate this problem by using maximum <br>mipmaps, a hierarchical data structure supporting accurate and efficient <br>rendering while simultaneously lowering the pre-computation costs to negligible <br>levels. Furthermore, maximum mipmaps allow for view-dependent level-of-detail <br>rendering. In combination with hierarchical ray-stepping this results in an <br>efficient intersection algorithm for large scale height fields.
Export
BibTeX
@inproceedings{Tevs-et-al_I3D08, TITLE = {Maximum Mipmaps for Fast, Accurate, and Scalable Dynamic Height Field Rendering}, AUTHOR = {Tevs, Art and Ihrke, Ivo and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-59593-983-8}, DOI = {10.1145/1342250.1342279}, LOCALID = {Local-ID: C125756E0038A185-811F5DD0D6BAA56EC125740B00438D95-Tevs2007z}, PUBLISHER = {ACM}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {This paper presents a GPU-based, fast, and accurate dynamic height field <br>rendering technique that scales well to large scale height fields. Current <br>real-time rendering algorithms for dynamic height fields employ approximate <br>ray-height field intersection methods, whereas accurate algorithms require <br>pre-computation in the order of seconds to minutes and are thus not suitable <br>for dynamic height field rendering. We alleviate this problem by using maximum <br>mipmaps, a hierarchical data structure supporting accurate and efficient <br>rendering while simultaneously lowering the pre-computation costs to negligible <br>levels. Furthermore, maximum mipmaps allow for view-dependent level-of-detail <br>rendering. In combination with hierarchical ray-stepping this results in an <br>efficient intersection algorithm for large scale height fields.}, BOOKTITLE = {I3D '08: Proceedings of the 2008 Symposium on Interactive 3D Graphics and Games}, EDITOR = {Spencer, Stephen}, PAGES = {183--190}, ADDRESS = {Redwood City, CA, USA}, }
Endnote
%0 Conference Proceedings %A Tevs, Art %A Ihrke, Ivo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Maximum Mipmaps for Fast, Accurate, and Scalable Dynamic Height Field Rendering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1C2E-9 %F EDOC: 428234 %F OTHER: Local-ID: C125756E0038A185-811F5DD0D6BAA56EC125740B00438D95-Tevs2007z %R 10.1145/1342250.1342279 %D 2008 %B 2008 Symposium on Interactive 3D Graphics and Games %Z date of event: 2008-02-15 - 2008-02-17 %C Redwood City, CA, USA %X This paper presents a GPU-based, fast, and accurate dynamic height field <br>rendering technique that scales well to large scale height fields. Current <br>real-time rendering algorithms for dynamic height fields employ approximate <br>ray-height field intersection methods, whereas accurate algorithms require <br>pre-computation in the order of seconds to minutes and are thus not suitable <br>for dynamic height field rendering. We alleviate this problem by using maximum <br>mipmaps, a hierarchical data structure supporting accurate and efficient <br>rendering while simultaneously lowering the pre-computation costs to negligible <br>levels. Furthermore, maximum mipmaps allow for view-dependent level-of-detail <br>rendering. In combination with hierarchical ray-stepping this results in an <br>efficient intersection algorithm for large scale height fields. %B I3D '08: Proceedings of the 2008 Symposium on Interactive 3D Graphics and Games %E Spencer, Stephen %P 183 - 190 %I ACM %@ 978-1-59593-983-8
Shi, K., Theisel, H., Weinkauf, T., Hege, H.-C., and Seidel, H.-P. 2008a. Visualizing Transport Structures of Time-dependent Flow Fields. IEEE Computer Graphics and Applications28, 5.
Export
BibTeX
@article{DBLP:journals/cga/ShiTWHS08, TITLE = {Visualizing Transport Structures of Time-dependent Flow Fields}, AUTHOR = {Shi, Kuangyu and Theisel, Holger and Weinkauf, Tino and Hege, Hans-Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0272-1716}, DOI = {10.1109/MCG.2008.106}, PUBLISHER = {IEEE Computer Society :}, ADDRESS = {Los Alamitos, CA}, YEAR = {2008}, DATE = {2008}, JOURNAL = {IEEE Computer Graphics and Applications}, VOLUME = {28}, NUMBER = {5}, PAGES = {24--36}, }
Endnote
%0 Journal Article %A Shi, Kuangyu %A Theisel, Holger %A Weinkauf, Tino %A Hege, Hans-Christian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Visualizing Transport Structures of Time-dependent Flow Fields : %G eng %U http://hdl.handle.net/21.11116/0000-000F-5800-E %R 10.1109/MCG.2008.106 %D 2008 %J IEEE Computer Graphics and Applications %V 28 %N 5 %& 24 %P 24 - 36 %I IEEE Computer Society : %C Los Alamitos, CA %@ false
Shi, K., Theisel, H., Weinkauf, T., Hege, H.-C., and Seidel, H.-P. 2008b. Finite-time Transport Structures of Flow Fields. Proceedings of the 2008 IEEE Pacific Visualization Symposium, IEEE.
Export
BibTeX
@inproceedings{Shi-et-al_PacificVis08, TITLE = {Finite-time Transport Structures of Flow Fields}, AUTHOR = {Shi, Kuangyu and Theisel, Holger and Weinkauf, Tino and Hege, Hans-Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4244-1966-1}, DOI = {10.1109/PACIFICVIS.2008.4475460}, LOCALID = {Local-ID: C125756E0038A185-B1BFDB8633CC6C35C125755A00335FCC-ShiPacificVIS2008}, PUBLISHER = {IEEE}, YEAR = {2008}, DATE = {2008}, BOOKTITLE = {Proceedings of the 2008 IEEE Pacific Visualization Symposium}, EDITOR = {Fujishiro, Issei and Li, Hua and Ma, Kwan-Liiu}, PAGES = {63--70}, ADDRESS = {Kyoto, Japn}, }
Endnote
%0 Conference Proceedings %A Shi, Kuangyu %A Theisel, Holger %A Weinkauf, Tino %A Hege, Hans-Christian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Finite-time Transport Structures of Flow Fields : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1BB3-1 %F EDOC: 428224 %F OTHER: Local-ID: C125756E0038A185-B1BFDB8633CC6C35C125755A00335FCC-ShiPacificVIS2008 %R 10.1109/PACIFICVIS.2008.4475460 %D 2008 %B 2008 IEEE Pacific Visualization Symposium %Z date of event: 2008-03-05 - 2008-03-07 %C Kyoto, Japn %B Proceedings of the 2008 IEEE Pacific Visualization Symposium %E Fujishiro, Issei; Li, Hua; Ma, Kwan-Liiu %P 63 - 70 %I IEEE %@ 978-1-4244-1966-1
Seidel, H.-P. 2008. Excellence Cluster “Multimodal Computing and Interaction” – Robust, Efficient and Intelligent Processing of Text, Speech, Visual Data, and High Dimensional Representations. Information technology50, 4.
Export
BibTeX
@article{Seidel_IT08, TITLE = {Excellence Cluster {\textquotedblleft}Multimodal Computing and Interaction{\textquotedblright} -- Robust, Efficient and Intelligent Processing of Text, Speech, Visual Data, and High Dimensional Representations}, AUTHOR = {Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1611-2776}, URL = {http://dx.doi.org/10.1524/itit.2008.0492}, DOI = {10.1524/itit.2008.0492}, LOCALID = {Local-ID: C125756E0038A185-09C05F19237C7B1DC12574B200342D6D-Seidel2008-ECMCaI}, PUBLISHER = {Oldenbourg Wissenschaftsverlag}, ADDRESS = {M{\"u}nchen}, YEAR = {2008}, DATE = {2008}, JOURNAL = {Information technology}, VOLUME = {50}, NUMBER = {4}, PAGES = {253--257}, }
Endnote
%0 Journal Article %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society %T Excellence Cluster &#8220;Multimodal Computing and Interaction&#8221; &#8211; Robust, Efficient and Intelligent Processing of Text, Speech, Visual Data, and High Dimensional Representations : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1B95-5 %F EDOC: 428221 %R 10.1524/itit.2008.0492 %U http://dx.doi.org/10.1524/itit.2008.0492 %F OTHER: Local-ID: C125756E0038A185-09C05F19237C7B1DC12574B200342D6D-Seidel2008-ECMCaI %D 2008 %* Review method: peer-reviewed %J Information technology %O it %V 50 %N 4 %& 253 %P 253 - 257 %I Oldenbourg Wissenschaftsverlag %C M&#252;nchen %@ false
Schultz, T. and Seidel, H.-P. 2008a. Estimating Crossing Fibers: A Tensor Decomposition Approach. IEEE Transactions on Visualization and Computer Graphics14, 6.
Abstract
Diffusion weighted magnetic resonance imaging is a unique<br> tool for non-invasive investigation of major nerve fiber tracts.<br> Since the popular diffusion tensor (DT-MRI) model is limited to<br> voxels with a single fiber direction, a number of high angular<br> resolution techniques have been proposed to provide information<br> about more diverse fiber distributions. Two such approaches are<br> Q-Ball imaging and spherical deconvolution, which produce<br> orientation distribution functions (ODFs) on the sphere. For<br> analysis and visualization, the maxima of these functions have been<br> used as principal directions, even though the results are known to<br> be biased in case of crossing fiber tracts. In this paper, we<br> present a more reliable technique for extracting discrete<br> orientations from continuous ODFs, which is based on decomposing<br> their higher-order tensor representation into an isotropic<br> component, several rank-1 terms, and a small residual. Comparing to<br> ground truth in synthetic data shows that the novel method reduces<br> bias and reliably reconstructs crossing fibers which are not<br> resolved as individual maxima in the ODF. We present results on both<br> Q-Ball and spherical deconvolution data and demonstrate that the<br> estimated directions allow for plausible fiber tracking in a real<br> data set.
Export
BibTeX
@article{Schultz-Seidel_TVCG08, TITLE = {Estimating Crossing Fibers: A Tensor Decomposition Approach}, AUTHOR = {Schultz, Thomas and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1077-2626}, URL = {http://dx.doi.org/10.1109/TVCG.2008.128}, DOI = {10.1109/TVCG.2008.128}, LOCALID = {Local-ID: C125756E0038A185-440434B60A189F13C125753F005507E6-Schultz2008Vis}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {New York, NY}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {Diffusion weighted magnetic resonance imaging is a unique<br> tool for non-invasive investigation of major nerve fiber tracts.<br> Since the popular diffusion tensor (DT-MRI) model is limited to<br> voxels with a single fiber direction, a number of high angular<br> resolution techniques have been proposed to provide information<br> about more diverse fiber distributions. Two such approaches are<br> Q-Ball imaging and spherical deconvolution, which produce<br> orientation distribution functions (ODFs) on the sphere. For<br> analysis and visualization, the maxima of these functions have been<br> used as principal directions, even though the results are known to<br> be biased in case of crossing fiber tracts. In this paper, we<br> present a more reliable technique for extracting discrete<br> orientations from continuous ODFs, which is based on decomposing<br> their higher-order tensor representation into an isotropic<br> component, several rank-1 terms, and a small residual. Comparing to<br> ground truth in synthetic data shows that the novel method reduces<br> bias and reliably reconstructs crossing fibers which are not<br> resolved as individual maxima in the ODF. We present results on both<br> Q-Ball and spherical deconvolution data and demonstrate that the<br> estimated directions allow for plausible fiber tracking in a real<br> data set.}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics}, VOLUME = {14}, NUMBER = {6}, PAGES = {1635--1642}, }
Endnote
%0 Journal Article %A Schultz, Thomas %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Estimating Crossing Fibers: A Tensor Decomposition Approach : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1B89-0 %F EDOC: 428218 %R 10.1109/TVCG.2008.128 %U http://dx.doi.org/10.1109/TVCG.2008.128 %F OTHER: Local-ID: C125756E0038A185-440434B60A189F13C125753F005507E6-Schultz2008Vis %D 2008 %* Review method: peer-reviewed %X Diffusion weighted magnetic resonance imaging is a unique<br> tool for non-invasive investigation of major nerve fiber tracts.<br> Since the popular diffusion tensor (DT-MRI) model is limited to<br> voxels with a single fiber direction, a number of high angular<br> resolution techniques have been proposed to provide information<br> about more diverse fiber distributions. Two such approaches are<br> Q-Ball imaging and spherical deconvolution, which produce<br> orientation distribution functions (ODFs) on the sphere. For<br> analysis and visualization, the maxima of these functions have been<br> used as principal directions, even though the results are known to<br> be biased in case of crossing fiber tracts. In this paper, we<br> present a more reliable technique for extracting discrete<br> orientations from continuous ODFs, which is based on decomposing<br> their higher-order tensor representation into an isotropic<br> component, several rank-1 terms, and a small residual. Comparing to<br> ground truth in synthetic data shows that the novel method reduces<br> bias and reliably reconstructs crossing fibers which are not<br> resolved as individual maxima in the ODF. We present results on both<br> Q-Ball and spherical deconvolution data and demonstrate that the<br> estimated directions allow for plausible fiber tracking in a real<br> data set. %J IEEE Transactions on Visualization and Computer Graphics %V 14 %N 6 %& 1635 %P 1635 - 1642 %I IEEE Computer Society %C New York, NY %@ false
Schultz, T. and Seidel, H.-P. 2008b. Using Eigenvalue Derivatives for Edge Detection in DT-MRI Data. Pattern Recognition, Springer.
Abstract
This paper introduces eigenvalue derivatives as a fundamental tool<br> to discern the different types of edges present in matrix-valued<br> images. It reviews basic results from perturbation theory, which<br> allow one to compute such derivatives, and shows how they can be<br> used to obtain novel edge detectors for matrix-valued images. It is<br> demonstrated that previous methods for edge detection in<br> matrix-valued images are simplified by considering them in terms of<br> eigenvalue derivatives. Moreover, eigenvalue derivatives are used to<br> analyze and refine the recently proposed Log-Euclidean edge<br> detector. Application examples focus on data from diffusion tensor<br> magnetic resonance imaging (DT-MRI).
Export
BibTeX
@inproceedings{Schultz-Seidel_DAGM08, TITLE = {Using Eigenvalue Derivatives for Edge Detection in {DT}-{MRI} Data}, AUTHOR = {Schultz, Thomas and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-540-69320-8}, URL = {http://dx.doi.org/10.1007/978-3-540-69321-5_20}, DOI = {10.1007/978-3-540-69321-5_20}, LOCALID = {Local-ID: C125756E0038A185-DAAFB03E8B94B2F7C125753F00545E1E-Schultz2008DAGM}, PUBLISHER = {Springer}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {This paper introduces eigenvalue derivatives as a fundamental tool<br> to discern the different types of edges present in matrix-valued<br> images. It reviews basic results from perturbation theory, which<br> allow one to compute such derivatives, and shows how they can be<br> used to obtain novel edge detectors for matrix-valued images. It is<br> demonstrated that previous methods for edge detection in<br> matrix-valued images are simplified by considering them in terms of<br> eigenvalue derivatives. Moreover, eigenvalue derivatives are used to<br> analyze and refine the recently proposed Log-Euclidean edge<br> detector. Application examples focus on data from diffusion tensor<br> magnetic resonance imaging (DT-MRI).}, BOOKTITLE = {Pattern Recognition}, EDITOR = {Rigoll, Gerhard}, PAGES = {193--202}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {5096}, ADDRESS = {Munich, Germany}, }
Endnote
%0 Conference Proceedings %A Schultz, Thomas %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Using Eigenvalue Derivatives for Edge Detection in DT-MRI Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1D51-F %F EDOC: 428219 %R 10.1007/978-3-540-69321-5_20 %U http://dx.doi.org/10.1007/978-3-540-69321-5_20 %F OTHER: Local-ID: C125756E0038A185-DAAFB03E8B94B2F7C125753F00545E1E-Schultz2008DAGM %D 2008 %B 30th DAGM Symposium on Pattern Recognition %Z date of event: 2008-06-10 - 2008-06-13 %C Munich, Germany %X This paper introduces eigenvalue derivatives as a fundamental tool<br> to discern the different types of edges present in matrix-valued<br> images. It reviews basic results from perturbation theory, which<br> allow one to compute such derivatives, and shows how they can be<br> used to obtain novel edge detectors for matrix-valued images. It is<br> demonstrated that previous methods for edge detection in<br> matrix-valued images are simplified by considering them in terms of<br> eigenvalue derivatives. Moreover, eigenvalue derivatives are used to<br> analyze and refine the recently proposed Log-Euclidean edge<br> detector. Application examples focus on data from diffusion tensor<br> magnetic resonance imaging (DT-MRI). %B Pattern Recognition %E Rigoll, Gerhard %P 193 - 202 %I Springer %@ 978-3-540-69320-8 %B Lecture Notes in Computer Science %N 5096 %U https://rdcu.be/dI52Y
Schultz, T., Sauber, N., Anwander, A., Theisel, H., and Seidel, H.-P. 2008a. Virtual Klingler Dissection: Putting Fibers into Context. Computer Graphics Forum (Proc. EuroVis 2008), Blackwell.
Abstract
Fiber tracking is a standard tool to estimate the course of major<br> white matter tracts from diffusion tensor magnetic resonance imaging<br> (DT-MRI) data. In this work, we aim at supporting the visual<br> analysis of classical streamlines from fiber tracking by integrating<br> context from anatomical data, acquired by a $T_1$-weighted MRI<br> measurement. To this end, we suggest a novel visualization metaphor,<br> which is based on data-driven deformation of geometry and has been<br> inspired by a technique for anatomical fiber preparation known as<br> Klingler dissection. We demonstrate that our method conveys the<br> relation between streamlines and surrounding anatomical features<br> more effectively than standard techniques like slice images and<br> direct volume rendering. The method works automatically, but its<br> GPU-based implementation allows for additional, intuitive<br> interaction.
Export
BibTeX
@inproceedings{Schultz-et-al_EuroVis08, TITLE = {Virtual Klingler Dissection: Putting Fibers into Context}, AUTHOR = {Schultz, Thomas and Sauber, Natascha and Anwander, Alfred and Theisel, Holger and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, URL = {http://dx.doi.org/10.1111/j.1467-8659.2008.01243.x}, DOI = {10.1111/j.1467-8659.2008.01243.x}, LOCALID = {Local-ID: C125756E0038A185-B0FA30E54855D004C125753F005322C1-Schultz2008EV}, PUBLISHER = {Blackwell}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {Fiber tracking is a standard tool to estimate the course of major<br> white matter tracts from diffusion tensor magnetic resonance imaging<br> (DT-MRI) data. In this work, we aim at supporting the visual<br> analysis of classical streamlines from fiber tracking by integrating<br> context from anatomical data, acquired by a $T_1$-weighted MRI<br> measurement. To this end, we suggest a novel visualization metaphor,<br> which is based on data-driven deformation of geometry and has been<br> inspired by a technique for anatomical fiber preparation known as<br> Klingler dissection. We demonstrate that our method conveys the<br> relation between streamlines and surrounding anatomical features<br> more effectively than standard techniques like slice images and<br> direct volume rendering. The method works automatically, but its<br> GPU-based implementation allows for additional, intuitive<br> interaction.}, BOOKTITLE = {EuroVis'08: Proceedings of the 10th Joint Eurographics / IEEE -- VGTC Conference on Visualization}, EDITOR = {Vilanova, Anna and Telea, Alex and Scheuermann, Gerik and M{\"o}ller, Torsten}, PAGES = {1063--1070}, JOURNAL = {Computer Graphics Forum (Proc. EuroVis)}, VOLUME = {27}, ISSUE = {3}, ADDRESS = {Eindhoven, The Netherlands}, }
Endnote
%0 Conference Proceedings %A Schultz, Thomas %A Sauber, Natascha %A Anwander, Alfred %A Theisel, Holger %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Virtual Klingler Dissection: Putting Fibers into Context : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1D5A-E %F EDOC: 428217 %R 10.1111/j.1467-8659.2008.01243.x %U http://dx.doi.org/10.1111/j.1467-8659.2008.01243.x %F OTHER: Local-ID: C125756E0038A185-B0FA30E54855D004C125753F005322C1-Schultz2008EV %D 2008 %B 10th Joint Eurographics / IEEE - VGTC Conference on Visualization %Z date of event: 2008-05-26 - 2008-05-28 %C Eindhoven, The Netherlands %X Fiber tracking is a standard tool to estimate the course of major<br> white matter tracts from diffusion tensor magnetic resonance imaging<br> (DT-MRI) data. In this work, we aim at supporting the visual<br> analysis of classical streamlines from fiber tracking by integrating<br> context from anatomical data, acquired by a $T_1$-weighted MRI<br> measurement. To this end, we suggest a novel visualization metaphor,<br> which is based on data-driven deformation of geometry and has been<br> inspired by a technique for anatomical fiber preparation known as<br> Klingler dissection. We demonstrate that our method conveys the<br> relation between streamlines and surrounding anatomical features<br> more effectively than standard techniques like slice images and<br> direct volume rendering. The method works automatically, but its<br> GPU-based implementation allows for additional, intuitive<br> interaction. %B EuroVis'08: Proceedings of the 10th Joint Eurographics / IEEE - VGTC Conference on Visualization %E Vilanova, Anna; Telea, Alex; Scheuermann, Gerik; M&#246;ller, Torsten %P 1063 - 1070 %I Blackwell %J Computer Graphics Forum %V 27 %N 3 %I Blackwell-Wiley %@ false
Schultz, T., Theisel, H., and Seidel, H.-P. 2008b. Crease surfaces: from theory to extraction and application to diffusion tensor MRI. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
Crease surfaces are two-dimensional manifolds along which a scalar field assumes a local maximum (ridge) or a local minimum (valley) in a constrained space. Unlike isosurfaces, they are able to capture extremal structures in the data. Creases have a long tradition in image processing and computer vision, and have recently become a popular tool for visualization. When extracting crease surfaces, degeneracies of the Hessian (i.e., lines along which two eigenvalues are equal), have so far been ignored. We show that these loci, however, have two important consequences for the topology of crease surfaces: First, creases are bounded not only by a side constraint on eigenvalue sign, but also by Hessian degeneracies. Second, crease surfaces are not in general orientable. We describe an efficient algorithm for the extraction of crease surfaces which takes these insights into account and demonstrate that it produces more accurate results than previous approaches. Finally, we show that DT-MRI streamsurfaces, which were previously used for the analysis of planar regions in diffusion tensor MRI data, are mathematically ill-defined. As an example application of our method, creases in a measure of planarity are presented as a viable substitute.
Export
BibTeX
@techreport{SchultzTheiselSeidel2008, TITLE = {Crease surfaces: from theory to extraction and application to diffusion tensor {MRI}}, AUTHOR = {Schultz, Thomas and Theisel, Holger and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2008-4-003}, NUMBER = {MPI-I-2008-4-003}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {Crease surfaces are two-dimensional manifolds along which a scalar field assumes a local maximum (ridge) or a local minimum (valley) in a constrained space. Unlike isosurfaces, they are able to capture extremal structures in the data. Creases have a long tradition in image processing and computer vision, and have recently become a popular tool for visualization. When extracting crease surfaces, degeneracies of the Hessian (i.e., lines along which two eigenvalues are equal), have so far been ignored. We show that these loci, however, have two important consequences for the topology of crease surfaces: First, creases are bounded not only by a side constraint on eigenvalue sign, but also by Hessian degeneracies. Second, crease surfaces are not in general orientable. We describe an efficient algorithm for the extraction of crease surfaces which takes these insights into account and demonstrate that it produces more accurate results than previous approaches. Finally, we show that DT-MRI streamsurfaces, which were previously used for the analysis of planar regions in diffusion tensor MRI data, are mathematically ill-defined. As an example application of our method, creases in a measure of planarity are presented as a viable substitute.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Schultz, Thomas %A Theisel, Holger %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Crease surfaces: from theory to extraction and application to diffusion tensor MRI : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-66B6-C %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2008-4-003 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2008 %P 33 p. %X Crease surfaces are two-dimensional manifolds along which a scalar field assumes a local maximum (ridge) or a local minimum (valley) in a constrained space. Unlike isosurfaces, they are able to capture extremal structures in the data. Creases have a long tradition in image processing and computer vision, and have recently become a popular tool for visualization. When extracting crease surfaces, degeneracies of the Hessian (i.e., lines along which two eigenvalues are equal), have so far been ignored. We show that these loci, however, have two important consequences for the topology of crease surfaces: First, creases are bounded not only by a side constraint on eigenvalue sign, but also by Hessian degeneracies. Second, crease surfaces are not in general orientable. We describe an efficient algorithm for the extraction of crease surfaces which takes these insights into account and demonstrate that it produces more accurate results than previous approaches. Finally, we show that DT-MRI streamsurfaces, which were previously used for the analysis of planar regions in diffusion tensor MRI data, are mathematically ill-defined. As an example application of our method, creases in a measure of planarity are presented as a viable substitute. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Schall, O., Zayer, R., and Seidel, H.-P. 2008a. Controlled Field Generation for Quad-remeshing. SPM ’08: Proceedings of the 2008 ACM Symposium on Solid and Physical Modeling, ACM.
Abstract
Quadrangular remeshing of triangulated surfaces has received an increasing <br>attention in recent years. A particularly elegant approach is the extraction of <br>quads from the streamlines of a harmonic field. While the construction of such <br>fields is by now a standard technique in geometry processing, enforcing design <br>constraints is still not fully investigated. This work presents a technique for <br>handling directional constraints by directly controlling the gradient of the <br>field. In this way, line constraints sketched by the user or automatically <br>obtained as feature lines can be fulfilled efficiently. Furthermore, we show <br>the potential of quasi-harmonic fields as a flexible tool for controlling the <br>behavior of the field over the surface. Treating the surface as an <br>inhomogeneous domain we can endow specific surface regions with field <br>attraction/repulsion properties.
Export
BibTeX
@inproceedings{Schall-et-al_SPM08, TITLE = {Controlled Field Generation for Quad-remeshing}, AUTHOR = {Schall, Oliver and Zayer, Rhaleb and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-60558-106-4}, DOI = {10.1145/1364901.1364942}, LOCALID = {Local-ID: C125756E0038A185-52DD9E28A7AC04A3C12574660046C332-spm08szs}, PUBLISHER = {ACM}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {Quadrangular remeshing of triangulated surfaces has received an increasing <br>attention in recent years. A particularly elegant approach is the extraction of <br>quads from the streamlines of a harmonic field. While the construction of such <br>fields is by now a standard technique in geometry processing, enforcing design <br>constraints is still not fully investigated. This work presents a technique for <br>handling directional constraints by directly controlling the gradient of the <br>field. In this way, line constraints sketched by the user or automatically <br>obtained as feature lines can be fulfilled efficiently. Furthermore, we show <br>the potential of quasi-harmonic fields as a flexible tool for controlling the <br>behavior of the field over the surface. Treating the surface as an <br>inhomogeneous domain we can endow specific surface regions with field <br>attraction/repulsion properties.}, BOOKTITLE = {SPM '08: Proceedings of the 2008 ACM Symposium on Solid and Physical Modeling}, EDITOR = {Haines, Eric and McGuire, Morgan}, PAGES = {295--300}, ADDRESS = {Stony Brook, New York, USA}, }
Endnote
%0 Conference Proceedings %A Schall, Oliver %A Zayer, Rhaleb %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Controlled Field Generation for Quad-remeshing : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1B54-6 %F EDOC: 428211 %F OTHER: Local-ID: C125756E0038A185-52DD9E28A7AC04A3C12574660046C332-spm08szs %R 10.1145/1364901.1364942 %D 2008 %B 2008 ACM Symposium on Solid and Physical Modeling %Z date of event: 2008-06-02 - 2008-06-04 %C Stony Brook, New York, USA %X Quadrangular remeshing of triangulated surfaces has received an increasing <br>attention in recent years. A particularly elegant approach is the extraction of <br>quads from the streamlines of a harmonic field. While the construction of such <br>fields is by now a standard technique in geometry processing, enforcing design <br>constraints is still not fully investigated. This work presents a technique for <br>handling directional constraints by directly controlling the gradient of the <br>field. In this way, line constraints sketched by the user or automatically <br>obtained as feature lines can be fulfilled efficiently. Furthermore, we show <br>the potential of quasi-harmonic fields as a flexible tool for controlling the <br>behavior of the field over the surface. Treating the surface as an <br>inhomogeneous domain we can endow specific surface regions with field <br>attraction/repulsion properties. %B SPM '08: Proceedings of the 2008 ACM Symposium on Solid and Physical Modeling %E Haines, Eric; McGuire, Morgan %P 295 - 300 %I ACM %@ 978-1-60558-106-4
Schall, O., Belyaev, A.G., and Seidel, H.-P. 2008b. Adaptive feature-preserving non-local denoising of static and time-varying range data. Computer-Aided Design40, 6.
Export
BibTeX
@article{Schall-et-al_CAD08, TITLE = {Adaptive feature-preserving non-local denoising of static and time-varying range data}, AUTHOR = {Schall, Oliver and Belyaev, Alexander G. and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0010-4485}, URL = {http://dx.doi.org/10.1016/j.cad.2008.01.011}, DOI = {10.1016/j.cad.2008.01.011}, LOCALID = {Local-ID: C125756E0038A185-6D2D1CE4B4F38C99C125755A0032829B-SchallCAD2008}, YEAR = {2008}, DATE = {2008}, JOURNAL = {Computer-Aided Design}, VOLUME = {40}, NUMBER = {6}, PAGES = {701--707}, }
Endnote
%0 Journal Article %A Schall, Oliver %A Belyaev, Alexander G. %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Adaptive feature-preserving non-local denoising of static and time-varying range data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1AC4-3 %F EDOC: 428210 %R 10.1016/j.cad.2008.01.011 %U http://dx.doi.org/10.1016/j.cad.2008.01.011 %F OTHER: Local-ID: C125756E0038A185-6D2D1CE4B4F38C99C125755A0032829B-SchallCAD2008 %D 2008 %* Review method: peer-reviewed %J Computer-Aided Design %V 40 %N 6 %& 701 %P 701 - 707 %@ false
Rosenhahn, B., Schmaltz, C., Brox, T., Weickert, J., and Seidel, H.-P. 2008a. Staying Well Grounded in Markerless Motion Capture. Pattern Recognition, Springer.
Export
BibTeX
@inproceedings{Rosenhahn-et-al_DAGM08, TITLE = {Staying Well Grounded in Markerless Motion Capture}, AUTHOR = {Rosenhahn, Bodo and Schmaltz, Christian and Brox, Thomas and Weickert, Joachim and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-540-69320-8}, DOI = {10.1007/978-3-540-69321-5_39}, LOCALID = {Local-ID: C125756E0038A185-31EC935FCEAB357FC12575590041FEE4-RosenhahnDAGM2008}, PUBLISHER = {Springer}, YEAR = {2008}, DATE = {2008}, BOOKTITLE = {Pattern Recognition}, EDITOR = {Rigoll, Gerhard}, PAGES = {385--395}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {5096}, ADDRESS = {Munich, Germany}, }
Endnote
%0 Conference Proceedings %A Rosenhahn, Bodo %A Schmaltz, Christian %A Brox, Thomas %A Weickert, Joachim %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Staying Well Grounded in Markerless Motion Capture : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1D0B-F %F EDOC: 428206 %F OTHER: Local-ID: C125756E0038A185-31EC935FCEAB357FC12575590041FEE4-RosenhahnDAGM2008 %R 10.1007/978-3-540-69321-5_39 %D 2008 %B 30th DAGM Symposium on Pattern Recognition %Z date of event: 2008-06-10 - 2008-06-13 %C Munich, Germany %B Pattern Recognition %E Rigoll, Gerhard %P 385 - 395 %I Springer %@ 978-3-540-69320-8 %B Lecture Notes in Computer Science %N 5096 %U https://rdcu.be/dI51d
Rosenhahn, B., Schmaltz, C., Brox, T., Weickert, J., Cremers, D., and Seidel, H.-P. 2008b. Markerless Motion Capture of Man-machine Interaction. IEEE Conference on Computer Vision and Pattern Recognition 2008 (CVPR 2008), IEEE Computer Society.
Export
BibTeX
@inproceedings{RosenhahnCVPR2008, TITLE = {Markerless Motion Capture of Man-machine Interaction}, AUTHOR = {Rosenhahn, Bodo and Schmaltz, Christian and Brox, Thomas and Weickert, Joachim and Cremers, Daniel and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1063-6919}, ISBN = {978-1-4244-2243-2; 978-1-4244-2242-5}, DOI = {10.1109/CVPR.2008.4587520}, PUBLISHER = {IEEE Computer Society}, YEAR = {2008}, DATE = {2008}, BOOKTITLE = {IEEE Conference on Computer Vision and Pattern Recognition 2008 (CVPR 2008)}, PAGES = {1--8}, ADDRESS = {Anchorage, AK, USA}, }
Endnote
%0 Conference Proceedings %A Rosenhahn, Bodo %A Schmaltz, Christian %A Brox, Thomas %A Weickert, Joachim %A Cremers, Daniel %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Markerless Motion Capture of Man-machine Interaction : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0023-CF88-5 %R 10.1109/CVPR.2008.4587520 %D 2008 %B 26th IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2008-06-24 - 2008-06-26 %C Anchorage, AK, USA %B IEEE Conference on Computer Vision and Pattern Recognition 2008 %P 1 - 8 %I IEEE Computer Society %@ false
Rosenhahn, B., Kersting, U., Powell, K., Brox, T., and Seidel, H.-P. 2008c. Tracking Clothed People. In: Human Motion. Springer, Dordrecht.
Export
BibTeX
@incollection{RosenhahnHMCh12-2007, TITLE = {Tracking Clothed People}, AUTHOR = {Rosenhahn, Bodo and Kersting, Uwe and Powell, Katie and Brox, Thomas and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4020-6692-4; 978-1-4020-6693-1}, DOI = {10.1007/978-1-4020-6693-1_12}, LOCALID = {Local-ID: C125756E0038A185-89C4ACD172FD60E4C12573C40048C37E-RosenhahnHMCh12-2007}, PUBLISHER = {Springer}, ADDRESS = {Dordrecht}, YEAR = {2008}, DATE = {2008}, BOOKTITLE = {Human Motion}, EDITOR = {Rosenhahn, Bodo and Klette, Reinhard and Metaxas, Dimitris}, PAGES = {295--317}, SERIES = {Computational Imaging and Vision}, VOLUME = {36}, }
Endnote
%0 Book Section %A Rosenhahn, Bodo %A Kersting, Uwe %A Powell, Katie %A Brox, Thomas %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Tracking Clothed People : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1D45-B %F EDOC: 428204 %F OTHER: Local-ID: C125756E0038A185-89C4ACD172FD60E4C12573C40048C37E-RosenhahnHMCh12-2007 %R 10.1007/978-1-4020-6693-1_12 %D 2008 %B Human Motion %E Rosenhahn, Bodo; Klette, Reinhard; Metaxas, Dimitris %P 295 - 317 %I Springer %C Dordrecht %@ 978-1-4020-6692-4 978-1-4020-6693-1 %S Computational Imaging and Vision %N 36
Rosenhahn, B., Brox, T., Cremers, D., and Seidel, H.-P. 2008d. Modeling and Tracking Line-Constrained Mechanical Systems. Robot Vision (RobVis 2008), Springer.
Export
BibTeX
@inproceedings{Rosenhahn-et-al_RobVis08, TITLE = {Modeling and Tracking Line-Constrained Mechanical Systems}, AUTHOR = {Rosenhahn, Bodo and Brox, Thomas and Cremers, Daniel and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-540-78156-1}, URL = {http://dx.doi.org/10.1007/978-3-540-78157-8_8}, DOI = {10.1007/978-3-540-78157-8_8}, LOCALID = {Local-ID: C125756E0038A185-57233C39F379505DC1257559004394D4-RosenhahnRobVis2008}, PUBLISHER = {Springer}, YEAR = {2008}, DATE = {2008}, BOOKTITLE = {Robot Vision (RobVis 2008)}, EDITOR = {Sommer, Gerald and Klette, Reinhard}, PAGES = {98--110}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {4931}, ADDRESS = {Auckland, New Zealand}, }
Endnote
%0 Conference Proceedings %A Rosenhahn, Bodo %A Brox, Thomas %A Cremers, Daniel %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Modeling and Tracking Line-Constrained Mechanical Systems : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1C42-A %F EDOC: 428203 %R 10.1007/978-3-540-78157-8_8 %U http://dx.doi.org/10.1007/978-3-540-78157-8_8 %F OTHER: Local-ID: C125756E0038A185-57233C39F379505DC1257559004394D4-RosenhahnRobVis2008 %D 2008 %B Second International Workshop on Robot Vision %Z date of event: 2008-02-18 - 2008-02-20 %C Auckland, New Zealand %B Robot Vision %E Sommer, Gerald; Klette, Reinhard %P 98 - 110 %I Springer %@ 978-3-540-78156-1 %B Lecture Notes in Computer Science %N 4931 %U https://rdcu.be/dITrI
Ritschel, T., Grosch, T., Kautz, J., and Seidel, H.-P. 2008a. Interactive Global Illumination Based on Coherent Surface Shadow Maps. Proceedings of Graphics Interface 2008 (GI 2008), Canadian Information Processing Society.
Export
BibTeX
@inproceedings{Ritschel-et-al_GI08, TITLE = {Interactive Global Illumination Based on Coherent Surface Shadow Maps}, AUTHOR = {Ritschel, Tobias and Grosch, Thorsten and Kautz, Jan and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-56881-423-0}, URL = {http://www.mpi-inf.mpg.de/~ritschel/Papers/CSSM.pdf}, LOCALID = {Local-ID: C125756E0038A185-721728AB1D34EDC4C12574FF0064B094-Ritschel2007CSSM}, PUBLISHER = {Canadian Information Processing Society}, YEAR = {2008}, DATE = {2008}, BOOKTITLE = {Proceedings of Graphics Interface 2008 (GI 2008)}, EDITOR = {Shaw, Chris and Bartram, Lyn}, PAGES = {185--192}, ADDRESS = {Windsor, Ontario, Canada}, }
Endnote
%0 Conference Proceedings %A Ritschel, Tobias %A Grosch, Thorsten %A Kautz, Jan %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive Global Illumination Based on Coherent Surface Shadow Maps : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1BFC-0 %F EDOC: 428198 %U http://www.mpi-inf.mpg.de/~ritschel/Papers/CSSM.pdf %F OTHER: Local-ID: C125756E0038A185-721728AB1D34EDC4C12574FF0064B094-Ritschel2007CSSM %D 2008 %B Graphics Interface 2008 %Z date of event: 2008-05-28 - 2008-05-30 %C Windsor, Ontario, Canada %B Proceedings of Graphics Interface 2008 %E Shaw, Chris; Bartram, Lyn %P 185 - 192 %I Canadian Information Processing Society %@ 978-1-56881-423-0
Ritschel, T., Grosch, T., Kim, M.H., Seidel, H.-P., Dachsbacher, C., and Kautz, J. 2008b. Imperfect Shadow Maps for Efficient Computation of Indirect Illumination. ACM Transactions on Graphics, ACM.
Export
BibTeX
@inproceedings{Ritschel-et-al_SIGGRAPH08, TITLE = {Imperfect Shadow Maps for Efficient Computation of Indirect Illumination}, AUTHOR = {Ritschel, Tobias and Grosch, Thorsten and Kim, Min H. and Seidel, Hans-Peter and Dachsbacher, Carsten and Kautz, Jan}, LANGUAGE = {eng}, ISSN = {0730-0301}, URL = {http://doi.acm.org/10.1145/1409060.1409082}, DOI = {10.1145/1409060.1409082}, LOCALID = {Local-ID: C125756E0038A185-EF029E6BD568330CC12574FF006532CF-Ritschel2007ISM}, PUBLISHER = {ACM}, PUBLISHER = {Association for Computing Machinery}, YEAR = {2008}, DATE = {2008}, BOOKTITLE = {ACM SIGGRAPH Asia 2008 papers}, EDITOR = {Hart, John c.}, PAGES = {129.1--129.8}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {27}, ISSUE = {5}, ADDRESS = {Singapore}, }
Endnote
%0 Conference Proceedings %A Ritschel, Tobias %A Grosch, Thorsten %A Kim, Min H. %A Seidel, Hans-Peter %A Dachsbacher, Carsten %A Kautz, Jan %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Imperfect Shadow Maps for Efficient Computation of Indirect Illumination : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1BEA-7 %F EDOC: 428199 %R 10.1145/1409060.1409082 %U http://doi.acm.org/10.1145/1409060.1409082 %F OTHER: Local-ID: C125756E0038A185-EF029E6BD568330CC12574FF006532CF-Ritschel2007ISM %D 2008 %B ACM SIGGRAPH Asia 2008 %Z date of event: 2008-12-11 - 2008-12-13 %C Singapore %B ACM SIGGRAPH Asia 2008 papers %E Hart, John c. %P 129.1 - 129.8 %I ACM %J ACM Transactions on Graphics %V 27 %N 5 %I Association for Computing Machinery %@ false
Ritschel, T., Smith, K., Ihrke, M., Grosch, T., Myszkowski, K., and Seidel, H.-P. 2008c. 3D Unsharp Masking for Scene Coherent Enhancement. ACM Transactions on Graphics (Proc. SIGGRAPH 2008), ACM.
Export
BibTeX
@inproceedings{Ritschel-et-al_SIGGRAPH08, TITLE = {{3D} Unsharp Masking for Scene Coherent Enhancement}, AUTHOR = {Ritschel, Tobias and Smith, Kaleigh and Ihrke, Matthias and Grosch, Thorsten and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, URL = {http://doi.acm.org/10.1145/1360612.1360689}, DOI = {10.1145/1360612.1360689}, LOCALID = {Local-ID: C125756E0038A185-41E8E32E3589C504C12574C500535A27-Ritschel08Sig}, PUBLISHER = {ACM}, PUBLISHER = {Association for Computing Machinery}, YEAR = {2008}, DATE = {2008}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2008}, EDITOR = {Turk, Greg}, PAGES = {90.1--90.8}, JOURNAL = {ACM Transactions on Graphics (Proc. SIGGRAPH)}, VOLUME = {27}, ISSUE = {3}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Ritschel, Tobias %A Smith, Kaleigh %A Ihrke, Matthias %A Grosch, Thorsten %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T 3D Unsharp Masking for Scene Coherent Enhancement : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1AB8-0 %F EDOC: 428200 %R 10.1145/1360612.1360689 %U http://doi.acm.org/10.1145/1360612.1360689 %F OTHER: Local-ID: C125756E0038A185-41E8E32E3589C504C12574C500535A27-Ritschel08Sig %D 2008 %B ACM SIGGRAPH 2008 %Z date of event: 2008-08-11 - 2008-08-15 %C Los Angeles, CA, USA %B Proceedings of ACM SIGGRAPH 2008 %E Turk, Greg %P 90.1 - 90.8 %I ACM %J ACM Transactions on Graphics %V 27 %N 3 %I Association for Computing Machinery %@ false
Neff, M., Kipp, M., Albrecht, I., and Seidel, H.-P. 2008. Gesture Modeling and Animation Based on a Probabilistic Re-creation of Speaker Style. ACM Transactions on Graphics27, 1.
Export
BibTeX
@article{Neff-et-al_TG08, TITLE = {Gesture Modeling and Animation Based on a Probabilistic Re-creation of Speaker Style}, AUTHOR = {Neff, Michael and Kipp, Michael and Albrecht, Irene and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/1330511.1330516}, LOCALID = {Local-ID: C125756E0038A185-FBD09C959917E85AC125729700399C00-Neff2007}, PUBLISHER = {Association for Computing Machinery}, ADDRESS = {New York, NY}, YEAR = {2008}, DATE = {2008}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {27}, NUMBER = {1}, PAGES = {5.1--5.24}, }
Endnote
%0 Journal Article %A Neff, Michael %A Kipp, Michael %A Albrecht, Irene %A Seidel, Hans-Peter %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Gesture Modeling and Animation Based on a Probabilistic Re-creation of Speaker Style : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1BD1-E %F EDOC: 428176 %F OTHER: Local-ID: C125756E0038A185-FBD09C959917E85AC125729700399C00-Neff2007 %R 10.1145/1330511.1330516 %D 2008 %* Review method: peer-reviewed %J ACM Transactions on Graphics %V 27 %N 1 %& 5.1 %P 5.1 - 5.24 %I Association for Computing Machinery %C New York, NY %@ false
Mantiuk, R. and Seidel, H.-P. 2008. Modeling a Generic Tone-mapping Operator. Computer Graphics Forum, Blackwell.
Abstract
Although several new tone-mapping operators are proposed each year, there is no <br>reliable method to validate their performance or to tell how different they are <br>from one another. In order to analyze and understand the behavior of <br>tone-mapping operators, we model their mechanisms by fitting a generic operator <br>to an HDR image and its tone-mapped LDR rendering. We demonstrate that the <br>majority of both global and local tone-mapping operators can be well <br>approximated by computationally inexpensive image processing operations, such <br>as a perpixel tone curve, a modulation transfer function and color saturation <br>adjustment. The results produced by such a generic tone-mapping algorithm are <br>often visually indistinguishable from much more expensive algorithms, such as <br>the bilateral filter. We show the usefulness of our generic tone-mapper in <br>backward-compatible HDR image compression, the black-box analysis of existing <br>tone mapping algorithms and the synthesis of new algorithms that are <br>combination of existing operators.
Export
BibTeX
@inproceedings{Mantiuk-Seidel_EUROGRAPHICS08, TITLE = {Modeling a Generic Tone-mapping Operator}, AUTHOR = {Mantiuk, Rafa{\l} and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, URL = {htttp://dx.doi.org/10.1111/j.1467-8659.2008.01168.x}, DOI = {10.1111/j.1467-8659.2008.01168.x}, LOCALID = {Local-ID: C125756E0038A185-F1F98FFE6474E383C1257443005A1039-Mantiuk2007mgtmo}, PUBLISHER = {Blackwell}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {Although several new tone-mapping operators are proposed each year, there is no <br>reliable method to validate their performance or to tell how different they are <br>from one another. In order to analyze and understand the behavior of <br>tone-mapping operators, we model their mechanisms by fitting a generic operator <br>to an HDR image and its tone-mapped LDR rendering. We demonstrate that the <br>majority of both global and local tone-mapping operators can be well <br>approximated by computationally inexpensive image processing operations, such <br>as a perpixel tone curve, a modulation transfer function and color saturation <br>adjustment. The results produced by such a generic tone-mapping algorithm are <br>often visually indistinguishable from much more expensive algorithms, such as <br>the bilateral filter. We show the usefulness of our generic tone-mapper in <br>backward-compatible HDR image compression, the black-box analysis of existing <br>tone mapping algorithms and the synthesis of new algorithms that are <br>combination of existing operators.}, BOOKTITLE = {EUROGRAPHICS 2008}, EDITOR = {Drettakis, George and Scopigno, Roberto}, PAGES = {699--708}, JOURNAL = {Computer Graphics Forum}, VOLUME = {27}, ISSUE = {2}, ADDRESS = {Crete, Greece}, }
Endnote
%0 Conference Proceedings %A Mantiuk, Rafa&#322; %A Seidel, Hans-Peter %+ Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Modeling a Generic Tone-mapping Operator : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1C40-E %F EDOC: 428160 %R 10.1111/j.1467-8659.2008.01168.x %U htttp://dx.doi.org/10.1111/j.1467-8659.2008.01168.x %F OTHER: Local-ID: C125756E0038A185-F1F98FFE6474E383C1257443005A1039-Mantiuk2007mgtmo %D 2008 %B 29th Annual Conference of the European Association for Computer Graphics %Z date of event: 2008-04-14 - 2008-04-18 %C Crete, Greece %X Although several new tone-mapping operators are proposed each year, there is no <br>reliable method to validate their performance or to tell how different they are <br>from one another. In order to analyze and understand the behavior of <br>tone-mapping operators, we model their mechanisms by fitting a generic operator <br>to an HDR image and its tone-mapped LDR rendering. We demonstrate that the <br>majority of both global and local tone-mapping operators can be well <br>approximated by computationally inexpensive image processing operations, such <br>as a perpixel tone curve, a modulation transfer function and color saturation <br>adjustment. The results produced by such a generic tone-mapping algorithm are <br>often visually indistinguishable from much more expensive algorithms, such as <br>the bilateral filter. We show the usefulness of our generic tone-mapper in <br>backward-compatible HDR image compression, the black-box analysis of existing <br>tone mapping algorithms and the synthesis of new algorithms that are <br>combination of existing operators. %B EUROGRAPHICS 2008 %E Drettakis, George; Scopigno, Roberto %P 699 - 708 %I Blackwell %J Computer Graphics Forum %V 27 %N 2 %I Blackwell-Wiley %@ false
Lintu, A., Lensch, H.P.A., Magnor, M., Lee, T.-H., El-Abed, S., and Seidel, H.-P. 2008. Multi-wavelength-based Method to de-project Gas and Dust Distributions of several Planetary Nebulae. Proceedings of Asymmetrical Planetary Nebulae IV, ADS.
Export
BibTeX
@inproceedings{Lintu:2007:MWM, TITLE = {Multi-wavelength-based Method to de-project Gas and Dust Distributions of several Planetary Nebulae}, AUTHOR = {Lintu, Andrei and Lensch, Hendrik P. A. and Magnor, Marcus and Lee, Ting-Hui and El-Abed, Sascha and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://www.iac.es/proyecto/apn4/pages/proceedings.php}, LOCALID = {Local-ID: C125756E0038A185-C3719DBA4194843EC12573C9003CD546-Lintu:2007:MWM}, PUBLISHER = {ADS}, YEAR = {2007}, DATE = {2008}, BOOKTITLE = {Proceedings of Asymmetrical Planetary Nebulae IV}, EDITOR = {Corradi, Romano L.M. and Manchado, Arturo and Soker, Noam}, PAGES = {1--6}, }
Endnote
%0 Conference Proceedings %A Lintu, Andrei %A Lensch, Hendrik P. A. %A Magnor, Marcus %A Lee, Ting-Hui %A El-Abed, Sascha %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Multi-wavelength-based Method to de-project Gas and Dust Distributions of several Planetary Nebulae : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1C5C-1 %F EDOC: 428148 %U http://www.iac.es/proyecto/apn4/pages/proceedings.php %F OTHER: Local-ID: C125756E0038A185-C3719DBA4194843EC12573C9003CD546-Lintu:2007:MWM %I ADS %D 2008 %B Untitled Event %Z date of event: 2007-06-18 - 2007-06-22 %C La Palma, Spain %B Proceedings of Asymmetrical Planetary Nebulae IV %E Corradi, Romano L.M.; Manchado, Arturo; Soker, Noam %P 1 - 6 %I ADS
Langer, T. and Seidel, H.-P. 2008. Higher Order Barycentric Coordinates. Computer Graphics Forum, Blackwell.
Abstract
In recent years, a wide range of generalized barycentric coordinates has been <br>suggested. <br>However, all of them lack control over derivatives. We show how the notion of <br>barycentric <br>coordinates can be extended to specify derivatives at control points. This is <br>also known <br>as Hermite interpolation. We introduce a method to modify existing barycentric <br>coordinates <br>to higher order barycentric coordinates and demonstrate, using higher order <br>mean value coordinates, <br>that our method, although conceptually simple and easy to implement, can be <br>used to give easy and intuitive <br>control at interactive frame rates over local space deformations such as <br>rotations.
Export
BibTeX
@inproceedings{Langer-Seidel_EUROGRAPHICS08, TITLE = {Higher Order Barycentric Coordinates}, AUTHOR = {Langer, Torsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, URL = {http://dx.doi.org/10.1111/j.1467-8659.2008.01143.x}, DOI = {10.1111/j.1467-8659.2008.01143.x}, LOCALID = {Local-ID: C125756E0038A185-637FCBB7F3F5A70FC12573CC00458C99-LangerEG08}, PUBLISHER = {Blackwell}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {In recent years, a wide range of generalized barycentric coordinates has been <br>suggested. <br>However, all of them lack control over derivatives. We show how the notion of <br>barycentric <br>coordinates can be extended to specify derivatives at control points. This is <br>also known <br>as Hermite interpolation. We introduce a method to modify existing barycentric <br>coordinates <br>to higher order barycentric coordinates and demonstrate, using higher order <br>mean value coordinates, <br>that our method, although conceptually simple and easy to implement, can be <br>used to give easy and intuitive <br>control at interactive frame rates over local space deformations such as <br>rotations.}, BOOKTITLE = {EUROGRAPHICS 2008}, EDITOR = {Drettakis, George and Scopigno, Roberto}, PAGES = {459--466}, JOURNAL = {Computer Graphics Forum}, VOLUME = {27}, ISSUE = {2}, ADDRESS = {Crete, Greece}, }
Endnote
%0 Conference Proceedings %A Langer, Torsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Higher Order Barycentric Coordinates : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1BD9-D %F EDOC: 428145 %R 10.1111/j.1467-8659.2008.01143.x %U http://dx.doi.org/10.1111/j.1467-8659.2008.01143.x %F OTHER: Local-ID: C125756E0038A185-637FCBB7F3F5A70FC12573CC00458C99-LangerEG08 %D 2008 %B 29th Annual Conference of the European Association for Computer Graphics %Z date of event: 2008-04-14 - 2008-04-18 %C Crete, Greece %X In recent years, a wide range of generalized barycentric coordinates has been <br>suggested. <br>However, all of them lack control over derivatives. We show how the notion of <br>barycentric <br>coordinates can be extended to specify derivatives at control points. This is <br>also known <br>as Hermite interpolation. We introduce a method to modify existing barycentric <br>coordinates <br>to higher order barycentric coordinates and demonstrate, using higher order <br>mean value coordinates, <br>that our method, although conceptually simple and easy to implement, can be <br>used to give easy and intuitive <br>control at interactive frame rates over local space deformations such as <br>rotations. %B EUROGRAPHICS 2008 %E Drettakis, George; Scopigno, Roberto %P 459 - 466 %I Blackwell %J Computer Graphics Forum %V 27 %N 2 %I Blackwell-Wiley %@ false
Langer, T., Belyaev, A., and Seidel, H.-P. 2008. Mean Value Bézier Maps. Advances in Geometric Modeling and Processing (GMP 2008), Springer.
Abstract
Bernstein polynomials are a classical tool in Computer Aided Design to create <br>smooth maps <br>with a high degree of local control. <br>They are used for the construction of B\'ezier surfaces, free-form <br>deformations, and many other applications. <br>However, classical Bernstein polynomials are only defined for simplices and <br>parallelepipeds. <br>These can in general not directly capture the shape of arbitrary objects. <br>Instead, <br>a tessellation of the desired domain has to be done first.<br><br>We construct smooth maps on arbitrary sets of polytopes <br>such that the restriction to each of the polytopes is a Bernstein polynomial in <br>mean value coordinates <br>(or any other generalized barycentric coordinates). <br>In particular, we show how smooth transitions between different <br>domain polytopes can be ensured.
Export
BibTeX
@inproceedings{Langer-et-al_GMP08, TITLE = {Mean Value B{\'e}zier Maps}, AUTHOR = {Langer, Torsten and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-540-79245-1/0302-9743}, URL = {http://dx.doi.org/10.1007/978-3-540-79246-8_18}, DOI = {10.1007/978-3-540-79246-8_18}, LOCALID = {Local-ID: C125756E0038A185-95E5933EF8BCA8D2C12573D4005C6D92-LangerGMP08}, PUBLISHER = {Springer}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {Bernstein polynomials are a classical tool in Computer Aided Design to create <br>smooth maps <br>with a high degree of local control. <br>They are used for the construction of B\'ezier surfaces, free-form <br>deformations, and many other applications. <br>However, classical Bernstein polynomials are only defined for simplices and <br>parallelepipeds. <br>These can in general not directly capture the shape of arbitrary objects. <br>Instead, <br>a tessellation of the desired domain has to be done first.<br><br>We construct smooth maps on arbitrary sets of polytopes <br>such that the restriction to each of the polytopes is a Bernstein polynomial in <br>mean value coordinates <br>(or any other generalized barycentric coordinates). <br>In particular, we show how smooth transitions between different <br>domain polytopes can be ensured.}, BOOKTITLE = {Advances in Geometric Modeling and Processing (GMP 2008)}, EDITOR = {Chen, Falai and J{\"u}ttler, Bert}, PAGES = {231--243}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {4975}, ADDRESS = {Hangzhou, China}, }
Endnote
%0 Conference Proceedings %A Langer, Torsten %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Mean Value B&#233;zier Maps : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1C30-1 %F EDOC: 428144 %R 10.1007/978-3-540-79246-8_18 %U http://dx.doi.org/10.1007/978-3-540-79246-8_18 %F OTHER: Local-ID: C125756E0038A185-95E5933EF8BCA8D2C12573D4005C6D92-LangerGMP08 %D 2008 %B 5th International Conference on Advances in Geometric Modeling and Processing %Z date of event: 2008-04-23 - 2008-04-25 %C Hangzhou, China %X Bernstein polynomials are a classical tool in Computer Aided Design to create <br>smooth maps <br>with a high degree of local control. <br>They are used for the construction of B\'ezier surfaces, free-form <br>deformations, and many other applications. <br>However, classical Bernstein polynomials are only defined for simplices and <br>parallelepipeds. <br>These can in general not directly capture the shape of arbitrary objects. <br>Instead, <br>a tessellation of the desired domain has to be done first.<br><br>We construct smooth maps on arbitrary sets of polytopes <br>such that the restriction to each of the polytopes is a Bernstein polynomial in <br>mean value coordinates <br>(or any other generalized barycentric coordinates). <br>In particular, we show how smooth transitions between different <br>domain polytopes can be ensured. %B Advances in Geometric Modeling and Processing %E Chen, Falai; J&#252;ttler, Bert %P 231 - 243 %I Springer %@ 978-3-540-79245-1/0302-9743 %B Lecture Notes in Computer Science %N 4975 %U https://rdcu.be/dITRn
Ihrke, I., Stich, T., Gottschlich, H., Magnor, M., and Seidel, H.-P. 2008. Fast Incident Light Field Acquisition and Rendering. Journal of WSCG16, 1.
Export
BibTeX
@article{Ihrke2008, TITLE = {Fast Incident Light Field Acquisition and Rendering}, AUTHOR = {Ihrke, Ivo and Stich, Timo and Gottschlich, Heike and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1213-6964; 1213-6972}, PUBLISHER = {University of West Bohemia}, ADDRESS = {Plzen}, YEAR = {2008}, DATE = {2008}, JOURNAL = {Journal of WSCG}, VOLUME = {16}, NUMBER = {1}, PAGES = {25--32}, }
Endnote
%0 Journal Article %A Ihrke, Ivo %A Stich, Timo %A Gottschlich, Heike %A Magnor, Marcus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Fast Incident Light Field Acquisition and Rendering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0027-A374-0 %7 2008 %D 2008 %J Journal of WSCG %V 16 %N 1 %& 25 %P 25 - 32 %I University of West Bohemia %C Plzen %@ false
Hullin, M.B., Fuchs, M., Ihrke, I., Seidel, H.-P., and Lensch, H.P.A. 2008a. Fluorescent Immersion Range Scanning. ACM Transactions on Graphics (Proc. SIGGRAPH 2008), ACM.
Abstract
The quality of a 3D range scan should not depend on the surface properties of <br>the object. Most active range scanning techniques, however, assume a diffuse <br>reflector to allow for a robust detection of incident light patterns. In our <br>approach we embed the object into a fluorescent liquid. By analyzing the light <br>rays that become visible due to fluorescence rather than analyzing their <br>reflections off the surface, we can detect the intersection points between the <br>projected laser sheet and the object surface for a wide range of different <br>materials. For transparent objects we can even directly depict a slice through <br>the object in just one image by matching its refractive index to the one of the <br>embedding liquid. This enables a direct sampling of the object geometry without <br>the need for computational reconstruction. This way, a high-resolution 3D <br>volume can be assembled simply by sweeping a laser plane through the object. We <br>demonstrate the effectiveness of our light sheet range scanning approach on a <br>set of objects manufactured from a variety of materials and material mixes, <br>including dark, translucent and transparent objects.
Export
BibTeX
@inproceedings{Hullin-et-al_SIGGRAPH08, TITLE = {Fluorescent Immersion Range Scanning}, AUTHOR = {Hullin, Matthias B. and Fuchs, Martin and Ihrke, Ivo and Seidel, Hans-Peter and Lensch, Hendrik P. A.}, LANGUAGE = {eng}, ISSN = {0730-0301}, URL = {http://doi.acm.org/10.1145/1360612.1360686}, DOI = {10.1145/1360612.1360686}, LOCALID = {Local-ID: C125756E0038A185-AEDE89BC0DB5AD63C12574D60029194E-HullinSIGGRAPH08}, PUBLISHER = {ACM}, PUBLISHER = {Association for Computing Machinery}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {The quality of a 3D range scan should not depend on the surface properties of <br>the object. Most active range scanning techniques, however, assume a diffuse <br>re{fl}ector to allow for a robust detection of incident light patterns. In our <br>approach we embed the object into a {fl}uorescent liquid. By analyzing the light <br>rays that become visible due to {fl}uorescence rather than analyzing their <br>re{fl}ections off the surface, we can detect the intersection points between the <br>projected laser sheet and the object surface for a wide range of different <br>materials. For transparent objects we can even directly depict a slice through <br>the object in just one image by matching its refractive index to the one of the <br>embedding liquid. This enables a direct sampling of the object geometry without <br>the need for computational reconstruction. This way, a high-resolution 3D <br>volume can be assembled simply by sweeping a laser plane through the object. We <br>demonstrate the effectiveness of our light sheet range scanning approach on a <br>set of objects manufactured from a variety of materials and material mixes, <br>including dark, translucent and transparent objects.}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2008}, EDITOR = {Turk, Greg}, PAGES = {87.1--87.10}, JOURNAL = {ACM Transactions on Graphics (Proc. SIGGRAPH)}, VOLUME = {27}, ISSUE = {3}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Hullin, Matthias B. %A Fuchs, Martin %A Ihrke, Ivo %A Seidel, Hans-Peter %A Lensch, Hendrik P. A. %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Fluorescent Immersion Range Scanning : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1BBA-4 %F EDOC: 428110 %R 10.1145/1360612.1360686 %U http://doi.acm.org/10.1145/1360612.1360686 %F OTHER: Local-ID: C125756E0038A185-AEDE89BC0DB5AD63C12574D60029194E-HullinSIGGRAPH08 %D 2008 %B ACM SIGGRAPH 2008 %Z date of event: 2008-08-11 - 2008-08-15 %C Los Angeles, CA, USA %X The quality of a 3D range scan should not depend on the surface properties of <br>the object. Most active range scanning techniques, however, assume a diffuse <br>re&#64258;ector to allow for a robust detection of incident light patterns. In our <br>approach we embed the object into a &#64258;uorescent liquid. By analyzing the light <br>rays that become visible due to &#64258;uorescence rather than analyzing their <br>re&#64258;ections off the surface, we can detect the intersection points between the <br>projected laser sheet and the object surface for a wide range of different <br>materials. For transparent objects we can even directly depict a slice through <br>the object in just one image by matching its refractive index to the one of the <br>embedding liquid. This enables a direct sampling of the object geometry without <br>the need for computational reconstruction. This way, a high-resolution 3D <br>volume can be assembled simply by sweeping a laser plane through the object. We <br>demonstrate the effectiveness of our light sheet range scanning approach on a <br>set of objects manufactured from a variety of materials and material mixes, <br>including dark, translucent and transparent objects. %B Proceedings of ACM SIGGRAPH 2008 %E Turk, Greg %P 87.1 - 87.10 %I ACM %J ACM Transactions on Graphics %V 27 %N 3 %I Association for Computing Machinery %@ false
Hullin, M.B., Fuchs, M., Ihrke, I., Ajdin, B., Seidel, H.-P., and Lensch, H.P.A. 2008b. Direct Visualization of Real-world Light Transport. Vision, Modeling, and Visualization 2008 (VMV 2008), Akademische Verlagsgesellschaft Aka.
Abstract
Light transport in complex scenes with possibly intricate optical properties is difficult to grasp intuitively. The study of light transport has so far mainly been conducted by indirect observations. Cameras or human observers typically only sense the radiance reflected from a scene, i.e. the light directly emitted or reflected from the last bounce of a possibly much longer light path. Models for the propagation of light, however, typically assume light waves or rays, concepts which so far have been communicated in an abstract way using formulas or sketches. In this paper, we propose the use of fluorescent fluids for direct visualization of light transport in the real world. In the fluorescent fluid the traces of light become visible as a small fraction of the energy transported along the ray is scattered out towards the viewer. We demonstrate this visualization for direct illumination effects such as reflections and refractions at various surfaces, as well as for global effects such as subsurface light transport in translucent material, caustics, or interreflections. As this allows for the inspection of entire light paths, rather than the last scattering event, we believe that this novel visualization can help to intuitively explain the phenomena of light transport to students and experts alike.
Export
BibTeX
@inproceedings{HullinVMV2008, TITLE = {Direct Visualization of Real-world Light Transport}, AUTHOR = {Hullin, Matthias B. and Fuchs, Martin and Ihrke, Ivo and Ajdin, Boris and Seidel, Hans-Peter and Lensch, Hendrik P. A.}, LANGUAGE = {eng}, ISBN = {978-3-89838-609-8}, LOCALID = {Local-ID: C125756E0038A185-39A3AED3CD668CC9C125755400580189-HullinVMV2008}, PUBLISHER = {Akademische Verlagsgesellschaft Aka}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {Light transport in complex scenes with possibly intricate optical properties is difficult to grasp intuitively. The study of light transport has so far mainly been conducted by indirect observations. Cameras or human observers typically only sense the radiance reflected from a scene, i.e. the light directly emitted or reflected from the last bounce of a possibly much longer light path. Models for the propagation of light, however, typically assume light waves or rays, concepts which so far have been communicated in an abstract way using formulas or sketches. In this paper, we propose the use of fluorescent fluids for direct visualization of light transport in the real world. In the fluorescent fluid the traces of light become visible as a small fraction of the energy transported along the ray is scattered out towards the viewer. We demonstrate this visualization for direct illumination effects such as reflections and refractions at various surfaces, as well as for global effects such as subsurface light transport in translucent material, caustics, or interreflections. As this allows for the inspection of entire light paths, rather than the last scattering event, we believe that this novel visualization can help to intuitively explain the phenomena of light transport to students and experts alike.}, BOOKTITLE = {Vision, Modeling, and Visualization 2008 (VMV 2008)}, EDITOR = {Deussen, Oliver and Keim, Daniel and Saupe, Dietmar}, PAGES = {363--371}, ADDRESS = {Konstanz, Germany}, }
Endnote
%0 Conference Proceedings %A Hullin, Matthias B. %A Fuchs, Martin %A Ihrke, Ivo %A Ajdin, Boris %A Seidel, Hans-Peter %A Lensch, Hendrik P. A. %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Direct Visualization of Real-world Light Transport : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1B6F-C %F EDOC: 428109 %F OTHER: Local-ID: C125756E0038A185-39A3AED3CD668CC9C125755400580189-HullinVMV2008 %D 2008 %B 13th International Fall Workshop Vision, Modeling, and Visualization 2008 %Z date of event: 2008-10-08 - 2008-10-10 %C Konstanz, Germany %X Light transport in complex scenes with possibly intricate optical properties is difficult to grasp intuitively. The study of light transport has so far mainly been conducted by indirect observations. Cameras or human observers typically only sense the radiance reflected from a scene, i.e. the light directly emitted or reflected from the last bounce of a possibly much longer light path. Models for the propagation of light, however, typically assume light waves or rays, concepts which so far have been communicated in an abstract way using formulas or sketches. In this paper, we propose the use of fluorescent fluids for direct visualization of light transport in the real world. In the fluorescent fluid the traces of light become visible as a small fraction of the energy transported along the ray is scattered out towards the viewer. We demonstrate this visualization for direct illumination effects such as reflections and refractions at various surfaces, as well as for global effects such as subsurface light transport in translucent material, caustics, or interreflections. As this allows for the inspection of entire light paths, rather than the last scattering event, we believe that this novel visualization can help to intuitively explain the phenomena of light transport to students and experts alike. %B Vision, Modeling, and Visualization 2008 %E Deussen, Oliver; Keim, Daniel; Saupe, Dietmar %P 363 - 371 %I Akademische Verlagsgesellschaft Aka %@ 978-3-89838-609-8
Herzog, R., Kinuwaki, S., Myszkowski, K., and Seidel, H.-P. 2008. Render2MPEG: A Perception-based Framework Towards Integrating Rendering and Video Compression. Computer Graphics Forum, Blackwell.
Abstract
Currently 3D animation rendering and video compression are completely <br>independent processes even if rendered<br>frames are streamed on-the-fly within a client-server platform. In such <br>scenario, which may involve time-varying<br>transmission bandwidths and different display characteristics at the client <br>side, dynamic adjustment of the rendering<br>quality to such requirements can lead to a better use of server resources. In <br>this work, we present a framework where<br>the renderer and MPEG codec are coupled through a straightforward interface <br>that provides precise motion vectors<br>from the rendering side to the codec and perceptual error thresholds for each <br>pixel in the opposite direction. The<br>perceptual error thresholds take into account bandwidth-dependent quantization <br>errors resulting from the lossy compression<br>as well as image content-dependent luminance and spatial contrast masking. The <br>availability of the discrete<br>cosine transform (DCT) coefficients at the codec side enables to use advanced <br>models of the human visual system<br>(HVS) in the perceptual error threshold derivation without incurring any <br>significant cost. Those error thresholds<br>are then used to control the rendering quality and make it well aligned with <br>the compressed stream quality. In our<br>prototype system we use the lightcuts technique developed by Walter et al., <br>which we enhance to handle dynamic<br>image sequences, and an MPEG-2 implementation. Our results clearly demonstrate <br>many advantages of coupling<br>the rendering with video compression in terms of faster rendering. Furthermore, <br>temporally coherent rendering leads<br>to a reduction of temporal artifacts.
Export
BibTeX
@inproceedings{Herzog-et-al_EUROGRAPHICS08, TITLE = {{Render2MPEG}: A Perception-based Framework Towards Integrating Rendering and Video Compression}, AUTHOR = {Herzog, Robert and Kinuwaki, Shinichi and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, URL = {http://dx.doi.org/10.1111/j.1467-8659.2008.01115.x}, DOI = {10.1111/j.1467-8659.2008.01115.x}, LOCALID = {Local-ID: C125756E0038A185-3B410E71DC037794C12574C5005576A5-Herzog08EG}, PUBLISHER = {Blackwell}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {Currently 3D animation rendering and video compression are completely <br>independent processes even if rendered<br>frames are streamed on-the-fly within a client-server platform. In such <br>scenario, which may involve time-varying<br>transmission bandwidths and different display characteristics at the client <br>side, dynamic adjustment of the rendering<br>quality to such requirements can lead to a better use of server resources. In <br>this work, we present a framework where<br>the renderer and MPEG codec are coupled through a straightforward interface <br>that provides precise motion vectors<br>from the rendering side to the codec and perceptual error thresholds for each <br>pixel in the opposite direction. The<br>perceptual error thresholds take into account bandwidth-dependent quantization <br>errors resulting from the lossy compression<br>as well as image content-dependent luminance and spatial contrast masking. The <br>availability of the discrete<br>cosine transform (DCT) coefficients at the codec side enables to use advanced <br>models of the human visual system<br>(HVS) in the perceptual error threshold derivation without incurring any <br>significant cost. Those error thresholds<br>are then used to control the rendering quality and make it well aligned with <br>the compressed stream quality. In our<br>prototype system we use the lightcuts technique developed by Walter et al., <br>which we enhance to handle dynamic<br>image sequences, and an MPEG-2 implementation. Our results clearly demonstrate <br>many advantages of coupling<br>the rendering with video compression in terms of faster rendering. Furthermore, <br>temporally coherent rendering leads<br>to a reduction of temporal artifacts.}, BOOKTITLE = {EUROGRAPHICS 2008}, EDITOR = {Drettakis, George and Scopigno, Roberto}, PAGES = {183--192}, JOURNAL = {Computer Graphics Forum}, VOLUME = {27}, ISSUE = {2}, ADDRESS = {Crete, Greece}, }
Endnote
%0 Conference Proceedings %A Herzog, Robert %A Kinuwaki, Shinichi %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Render2MPEG: A Perception-based Framework Towards Integrating Rendering and Video Compression : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1CD2-8 %F EDOC: 428103 %R 10.1111/j.1467-8659.2008.01115.x %U http://dx.doi.org/10.1111/j.1467-8659.2008.01115.x %F OTHER: Local-ID: C125756E0038A185-3B410E71DC037794C12574C5005576A5-Herzog08EG %D 2008 %B 29th Annual Conference of the European Association for Computer Graphics %Z date of event: 2008-04-14 - 2008-04-18 %C Crete, Greece %X Currently 3D animation rendering and video compression are completely <br>independent processes even if rendered<br>frames are streamed on-the-fly within a client-server platform. In such <br>scenario, which may involve time-varying<br>transmission bandwidths and different display characteristics at the client <br>side, dynamic adjustment of the rendering<br>quality to such requirements can lead to a better use of server resources. In <br>this work, we present a framework where<br>the renderer and MPEG codec are coupled through a straightforward interface <br>that provides precise motion vectors<br>from the rendering side to the codec and perceptual error thresholds for each <br>pixel in the opposite direction. The<br>perceptual error thresholds take into account bandwidth-dependent quantization <br>errors resulting from the lossy compression<br>as well as image content-dependent luminance and spatial contrast masking. The <br>availability of the discrete<br>cosine transform (DCT) coefficients at the codec side enables to use advanced <br>models of the human visual system<br>(HVS) in the perceptual error threshold derivation without incurring any <br>significant cost. Those error thresholds<br>are then used to control the rendering quality and make it well aligned with <br>the compressed stream quality. In our<br>prototype system we use the lightcuts technique developed by Walter et al., <br>which we enhance to handle dynamic<br>image sequences, and an MPEG-2 implementation. Our results clearly demonstrate <br>many advantages of coupling<br>the rendering with video compression in terms of faster rendering. Furthermore, <br>temporally coherent rendering leads<br>to a reduction of temporal artifacts. %B EUROGRAPHICS 2008 %E Drettakis, George; Scopigno, Roberto %P 183 - 192 %I Blackwell %J Computer Graphics Forum %V 27 %N 2 %I Blackwell-Wiley %@ false
Hasler, N., Rosenhahn, B., and Seidel, H.-P. 2008. Cloth Simulation Based Motion Capture of Dressed Humans. In: Virtual Realities - Dagstuhl Seminar 2008. Springer, Berlin.
Export
BibTeX
@incollection{DBLP:conf/dagstuhl/HaslerRS08, TITLE = {Cloth Simulation Based Motion Capture of Dressed Humans}, AUTHOR = {Hasler, Nils and Rosenhahn, Bodo and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-211-99177-0}, DOI = {10.1007/978-3-211-99178-7_7}, PUBLISHER = {Springer}, ADDRESS = {Berlin}, YEAR = {2008}, DATE = {2008}, BOOKTITLE = {Virtual Realities -- Dagstuhl Seminar 2008}, EDITOR = {Coquillart, Sabine and Brunnett, Guido and Welch, Greg}, PAGES = {123--138}, }
Endnote
%0 Book Section %A Hasler, Nils %A Rosenhahn, Bodo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Cloth Simulation Based Motion Capture of Dressed Humans : %G eng %U http://hdl.handle.net/21.11116/0000-000F-52C8-3 %R 10.1007/978-3-211-99178-7_7 %D 2008 %B Virtual Realities - Dagstuhl Seminar 2008 %E Coquillart, Sabine; Brunnett, Guido; Welch, Greg %P 123 - 138 %I Springer %C Berlin %@ 978-3-211-99177-0 %U https://rdcu.be/dITSM
Han, D., Rosenhahn, B., Gehrig, S., and Seidel, H.-P. 2008. Combined Registration Methods for Pose Estimation. Advances in Visual Computing (ISVC 2008), Springer.
Export
BibTeX
@inproceedings{Han-et-al_ISVC08, TITLE = {Combined Registration Methods for Pose Estimation}, AUTHOR = {Han, Dong and Rosenhahn, Bodo and Gehrig, Stefan and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {9738-3-540-89638-8}, URL = {http://dx.doi.org/10.1007/978-3-540-89639-5_87}, DOI = {10.1007/978-3-540-89639-5_87}, LOCALID = {Local-ID: C125756E0038A185-0CE6F32A423D5F43C12575590042CED6-RosenhahnISVC2008}, PUBLISHER = {Springer}, YEAR = {2008}, DATE = {2008}, BOOKTITLE = {Advances in Visual Computing (ISVC 2008)}, EDITOR = {Bebis, G. and Boyle, R.D. and Parvin, B. and Koracin, D. and Remagnino, P. and Porikli, F. M. and Peters, J. and Klosowski, J.T. and Arns, L.L. and Chun, Y.K. and Rhyne, T.-M. and Monroe, L.}, PAGES = {913--924}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {5358}, ADDRESS = {Las Vegas, NV, USA}, }
Endnote
%0 Conference Proceedings %A Han, Dong %A Rosenhahn, Bodo %A Gehrig, Stefan %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Combined Registration Methods for Pose Estimation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1B37-8 %F EDOC: 428099 %R 10.1007/978-3-540-89639-5_87 %U http://dx.doi.org/10.1007/978-3-540-89639-5_87 %F OTHER: Local-ID: C125756E0038A185-0CE6F32A423D5F43C12575590042CED6-RosenhahnISVC2008 %D 2008 %B 4th International Symposium on Advances in Visual Computing %Z date of event: 2008-12-01 - 2008-12-03 %C Las Vegas, NV, USA %B Advances in Visual Computing %E Bebis, G.; Boyle, R.D.; Parvin, B.; Koracin, D.; Remagnino, P.; Porikli, F. M.; Peters, J.; Klosowski, J.T.; Arns, L.L.; Chun, Y.K.; Rhyne, T.-M.; Monroe, L. %P 913 - 924 %I Springer %@ 9738-3-540-89638-8 %B Lecture Notes in Computer Science %N 5358 %U https://rdcu.be/dITA6
Gross, M., Müller, H., Seidel, H.-P., and Shum, H., eds. 2008. Visual Computing - Convergence of Computer Graphics and Computer Vision. Internationales Begegnungs- und Forschungszentrum fuer Informatik (IBFI).
Export
BibTeX
@proceedings{DBLP:conf/dagstuhl/2007P7171, TITLE = {Visual Computing -- Convergence of Computer Graphics and Computer Vision}, EDITOR = {Gross, Markus and M{\"u}ller, Heinrich and Seidel, Hans-Peter and Shum, Harry}, LANGUAGE = {eng}, PUBLISHER = {Internationales Begegnungs- und Forschungszentrum fuer Informatik (IBFI)}, YEAR = {2007}, DATE = {2008}, ADDRESS = {Schloss Dagstuhl, Wadern, Germany}, }
Endnote
%0 Conference Proceedings %E Gross, Markus %E M&#252;ller, Heinrich %E Seidel, Hans-Peter %E Shum, Harry %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Visual Computing - Convergence of Computer Graphics and Computer Vision : %G eng %U http://hdl.handle.net/21.11116/0000-000F-5237-7 %I Internationales Begegnungs- und Forschungszentrum fuer Informatik (IBFI) %D 2008 %B Dagstuhl Seminar 07171: Visual Computing &#8211; Convergence of Computer Graphics and Computer Vision %Z date of event: 2007-04-22 - 2007-04-27 %D 2007 %C Schloss Dagstuhl, Wadern, Germany
Granados Velásquez, M.A., Seidel, H.-P., and Lensch, H.P.A. 2008. Graphics Interface. Graphics Interface 2008 : proceedings, ACM Press.
Abstract
We address the problem of reconstructing the background of<br> a scene from a set of photographs featuring several occluding<br> objects. We assume that the photographs are obtained from the same<br> viewpoint and under similar illumination conditions. Our approach<br> is to define the background as a composite of the input photographs.<br> Each possible composite is assigned a cost, and the resulting cost<br> function is minimized. We penalize deviations from the following<br> two model assumptions: background objects are stationary, and<br> background objects are more likely to appear across the photographs.<br> We approximate object stationariness using a motion boundary<br> consistency term, and object likelihood using probability density<br> estimates. The penalties are combined using an entropy-based<br> weighting function. Furthermore, we constraint the solution space<br> in order to avoid composites that cut through objects. The cost<br> function is minimized using graph cuts, and the final result is<br> composed using gradient domain fusion.<br> We demonstrate the application of our method to the recovering of<br> clean, unoccluded shots of crowded public places, as well as to the<br> removal of ghosting artifacts in the reconstruction of high dynamic<br> range images from multi-exposure sequences. Our contribution is the<br> definition of an automatic method for consistent background<br> estimation from multiple exposures featuring occluders, and its<br> application to the problem of ghost removal in high dynamic range<br> image reconstruction.
Export
BibTeX
@inproceedings{Granados2008, TITLE = {Graphics Interface}, AUTHOR = {Granados Vel{\'a}squez, Miguel Andr{\'e}s and Seidel, Hans-Peter and Lensch, Hendrik P. A.}, LANGUAGE = {eng}, ISBN = {978-1-56881-423-0}, URL = {http://www.mpi-inf.mpg.de/~granados/papers/granados08_bgestimation.pdf}, DOI = {10.1145/1375714.1375721}, LOCALID = {Local-ID: C125756E0038A185-67F1775FE220D8F7C125754B0055108E-Granados2008}, PUBLISHER = {ACM Press}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {We address the problem of reconstructing the background of<br> a scene from a set of photographs featuring several occluding<br> objects. We assume that the photographs are obtained from the same<br> viewpoint and under similar illumination conditions. Our approach<br> is to define the background as a composite of the input photographs.<br> Each possible composite is assigned a cost, and the resulting cost<br> function is minimized. We penalize deviations from the following<br> two model assumptions: background objects are stationary, and<br> background objects are more likely to appear across the photographs.<br> We approximate object stationariness using a motion boundary<br> consistency term, and object likelihood using probability density<br> estimates. The penalties are combined using an entropy-based<br> weighting function. Furthermore, we constraint the solution space<br> in order to avoid composites that cut through objects. The cost<br> function is minimized using graph cuts, and the final result is<br> composed using gradient domain fusion.<br> We demonstrate the application of our method to the recovering of<br> clean, unoccluded shots of crowded public places, as well as to the<br> removal of ghosting artifacts in the reconstruction of high dynamic<br> range images from multi-exposure sequences. Our contribution is the<br> definition of an automatic method for consistent background<br> estimation from multiple exposures featuring occluders, and its<br> application to the problem of ghost removal in high dynamic range<br> image reconstruction.}, BOOKTITLE = {Graphics Interface 2008 : proceedings}, EDITOR = {Shaw, Chris and Bartram, Lyn}, PAGES = {33--40}, SERIES = {ACM International Conference Proceeding Series}, }
Endnote
%0 Conference Proceedings %A Granados Vel&#225;squez, Miguel Andr&#233;s %A Seidel, Hans-Peter %A Lensch, Hendrik P. A. %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Graphics Interface : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1BD5-6 %F EDOC: 428095 %R 10.1145/1375714.1375721 %U http://www.mpi-inf.mpg.de/~granados/papers/granados08_bgestimation.pdf %F OTHER: Local-ID: C125756E0038A185-67F1775FE220D8F7C125754B0055108E-Granados2008 %D 2008 %B Untitled Event %Z date of event: 2008-05-28 - 2008-05-30 %C Windsor, Ontario, Canada %X We address the problem of reconstructing the background of<br> a scene from a set of photographs featuring several occluding<br> objects. We assume that the photographs are obtained from the same<br> viewpoint and under similar illumination conditions. Our approach<br> is to define the background as a composite of the input photographs.<br> Each possible composite is assigned a cost, and the resulting cost<br> function is minimized. We penalize deviations from the following<br> two model assumptions: background objects are stationary, and<br> background objects are more likely to appear across the photographs.<br> We approximate object stationariness using a motion boundary<br> consistency term, and object likelihood using probability density<br> estimates. The penalties are combined using an entropy-based<br> weighting function. Furthermore, we constraint the solution space<br> in order to avoid composites that cut through objects. The cost<br> function is minimized using graph cuts, and the final result is<br> composed using gradient domain fusion.<br> We demonstrate the application of our method to the recovering of<br> clean, unoccluded shots of crowded public places, as well as to the<br> removal of ghosting artifacts in the reconstruction of high dynamic<br> range images from multi-exposure sequences. Our contribution is the<br> definition of an automatic method for consistent background<br> estimation from multiple exposures featuring occluders, and its<br> application to the problem of ghost removal in high dynamic range<br> image reconstruction. %B Graphics Interface 2008 : proceedings %E Shaw, Chris; Bartram, Lyn %P 33 - 40 %I ACM Press %@ 978-1-56881-423-0 %B ACM International Conference Proceeding Series
Granados, M., Seidel, H.-P., and Lensch, H.P.A. 2008. Background Estimation from Non-time Sequence Images. Proceedings of the Graphics Interface 2008 (GI 2008), Canadian Information Processing Society.
Export
BibTeX
@inproceedings{DBLP:conf/graphicsinterface/GranadosSL08, TITLE = {Background Estimation from Non-time Sequence Images}, AUTHOR = {Granados, Miguel and Seidel, Hans-Peter and Lensch, Hendrik P. A.}, LANGUAGE = {eng}, ISBN = {978-1-56881-423-0}, PUBLISHER = {Canadian Information Processing Society}, YEAR = {2008}, DATE = {2008}, BOOKTITLE = {Proceedings of the Graphics Interface 2008 (GI 2008)}, EDITOR = {Shaw, Chris and Bartram, Lyn}, PAGES = {33--40}, SERIES = {ACM International Conference Proceeding Series}, ADDRESS = {Windsor, Ontario, Canada}, }
Endnote
%0 Conference Proceedings %A Granados, Miguel %A Seidel, Hans-Peter %A Lensch, Hendrik P. A. %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Background Estimation from Non-time Sequence Images : %G eng %U http://hdl.handle.net/21.11116/0000-000F-52BF-E %D 2008 %B Graphics Interface 2008 %Z date of event: 2008-05-28 - 2008-05-30 %C Windsor, Ontario, Canada %B Proceedings of the Graphics Interface 2008 %E Shaw, Chris; Bartram, Lyn %P 33 - 40 %I Canadian Information Processing Society %@ 978-1-56881-423-0 %B ACM International Conference Proceeding Series
Gall, J., Rosenhahn, B., Gehrig, S., and Seidel, H.-P. 2008a. Model-based Motion Capture for Crash Test Video Analysis. Pattern Recognition, Springer.
Abstract
In this work, we propose a model-based approach for estimating the 3D position <br>and orientation of a dummy's head for crash test video analysis. Instead of <br>relying on photogrammetric markers which provide only sparse 3D measurements, <br>features present in the texture of the object's surface are used for tracking. <br>In order to handle also small and partially occluded objects, the concepts of <br>region-based and patch-based matching are combined for pose estimation. For a <br>qualitative and quantitative evaluation, the proposed method is applied to two <br>multi-view crash test videos captured by high-speed cameras.
Export
BibTeX
@inproceedings{Gall-et-al_DAGM08, TITLE = {Model-based Motion Capture for Crash Test Video Analysis}, AUTHOR = {Gall, J{\"u}rgen and Rosenhahn, Bodo and Gehrig, Stefan and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-540-69320-8}, URL = {http://www.mpi-inf.mpg.de/~jgall/download/jgall_crash_dagm08.pdf}, DOI = {10.1007/978-3-540-69321-5_10}, LOCALID = {Local-ID: C125756E0038A185-056686D45C3BF4B4C1257554004F1C84-Gall2008b}, PUBLISHER = {Springer}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {In this work, we propose a model-based approach for estimating the 3D position <br>and orientation of a dummy's head for crash test video analysis. Instead of <br>relying on photogrammetric markers which provide only sparse 3D measurements, <br>features present in the texture of the object's surface are used for tracking. <br>In order to handle also small and partially occluded objects, the concepts of <br>region-based and patch-based matching are combined for pose estimation. For a <br>qualitative and quantitative evaluation, the proposed method is applied to two <br>multi-view crash test videos captured by high-speed cameras.}, BOOKTITLE = {Pattern Recognition}, EDITOR = {Rigoll, Gerhard}, PAGES = {92--101}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {5096}, ADDRESS = {Munich, Germany}, }
Endnote
%0 Conference Proceedings %A Gall, J&#252;rgen %A Rosenhahn, Bodo %A Gehrig, Stefan %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Model-based Motion Capture for Crash Test Video Analysis : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1C3D-7 %F EDOC: 428086 %R 10.1007/978-3-540-69321-5_10 %U http://www.mpi-inf.mpg.de/~jgall/download/jgall_crash_dagm08.pdf %F OTHER: Local-ID: C125756E0038A185-056686D45C3BF4B4C1257554004F1C84-Gall2008b %D 2008 %B 30th DAGM Symposium on Pattern Recognition %Z date of event: 2008-06-10 - 2008-06-13 %C Munich, Germany %X In this work, we propose a model-based approach for estimating the 3D position <br>and orientation of a dummy's head for crash test video analysis. Instead of <br>relying on photogrammetric markers which provide only sparse 3D measurements, <br>features present in the texture of the object's surface are used for tracking. <br>In order to handle also small and partially occluded objects, the concepts of <br>region-based and patch-based matching are combined for pose estimation. For a <br>qualitative and quantitative evaluation, the proposed method is applied to two <br>multi-view crash test videos captured by high-speed cameras. %B Pattern Recognition %E Rigoll, Gerhard %P 92 - 101 %I Springer %@ 978-3-540-69320-8 %B Lecture Notes in Computer Science %N 5096 %U https://rdcu.be/dI55s
Gall, J., Rosenhahn, B., and Seidel, H.-P. 2008b. An Introduction to Interacting Simulated Annealing. In: Human Motion - Understanding, Modeling, Capture, and Animation. Springer, Dordrecht.
Abstract
Human motion capturing can be regarded as an optimization problem where one searches for the pose that minimizes a previously defined error function based on some image features. Most approaches for solving this problem use iterative methods like gradient descent approaches. They work quite well as long as they do not get distracted by local optima. We introduce a novel approach for global optimization that is suitable for the tasks as they occur during human motion capturing. We call the method interacting simulated annealing since it is based on an interacting particle system that converges to the global optimum similar to simulated annealing. We provide a detailed mathematical discussion that includes convergence results and annealing properties. Moreover, we give two examples that demonstrate possible applications of the algorithm, namely a global optimization problem and a multi-view human motion capturing task including segmentation, prediction, and prior knowledge. A quantative error analysis also indicates the performance and the robustness of the interacting simulated annealing algorithm.
Export
BibTeX
@incollection{Gall2007b, TITLE = {An Introduction to Interacting Simulated Annealing}, AUTHOR = {Gall, J{\"u}rgen and Rosenhahn, Bodo and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4020-6692-4}, URL = {http://dx.doi.org/10.1007/978-1-4020-6693-1_13}, LOCALID = {Local-ID: C125756E0038A185-93709ED052577626C12572910046D5D2-Gall2007b}, PUBLISHER = {Springer}, ADDRESS = {Dordrecht}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {Human motion capturing can be regarded as an optimization problem where one searches for the pose that minimizes a previously defined error function based on some image features. Most approaches for solving this problem use iterative methods like gradient descent approaches. They work quite well as long as they do not get distracted by local optima. We introduce a novel approach for global optimization that is suitable for the tasks as they occur during human motion capturing. We call the method interacting simulated annealing since it is based on an interacting particle system that converges to the global optimum similar to simulated annealing. We provide a detailed mathematical discussion that includes convergence results and annealing properties. Moreover, we give two examples that demonstrate possible applications of the algorithm, namely a global optimization problem and a multi-view human motion capturing task including segmentation, prediction, and prior knowledge. A quantative error analysis also indicates the performance and the robustness of the interacting simulated annealing algorithm.}, BOOKTITLE = {Human Motion -- Understanding, Modeling, Capture, and Animation}, EDITOR = {Rosenhahn, Bodo and Klette, Reinhard and Metaxas, Dimitris}, PAGES = {319--345}, SERIES = {Computational Imaging and Vision}, VOLUME = {36}, }
Endnote
%0 Book Section %A Gall, J&#252;rgen %A Rosenhahn, Bodo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T An Introduction to Interacting Simulated Annealing : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1AE9-2 %F EDOC: 428088 %U http://dx.doi.org/10.1007/978-1-4020-6693-1_13 %F OTHER: Local-ID: C125756E0038A185-93709ED052577626C12572910046D5D2-Gall2007b %I Springer %C Dordrecht %D 2008 %X Human motion capturing can be regarded as an optimization problem where one searches for the pose that minimizes a previously defined error function based on some image features. Most approaches for solving this problem use iterative methods like gradient descent approaches. They work quite well as long as they do not get distracted by local optima. We introduce a novel approach for global optimization that is suitable for the tasks as they occur during human motion capturing. We call the method interacting simulated annealing since it is based on an interacting particle system that converges to the global optimum similar to simulated annealing. We provide a detailed mathematical discussion that includes convergence results and annealing properties. Moreover, we give two examples that demonstrate possible applications of the algorithm, namely a global optimization problem and a multi-view human motion capturing task including segmentation, prediction, and prior knowledge. A quantative error analysis also indicates the performance and the robustness of the interacting simulated annealing algorithm. %B Human Motion - Understanding, Modeling, Capture, and Animation %E Rosenhahn, Bodo; Klette, Reinhard; Metaxas, Dimitris %P 319 - 345 %I Springer %C Dordrecht %@ 978-1-4020-6692-4 %S Computational Imaging and Vision %N 36
Gall, J., Rosenhahn, B., and Seidel, H.-P. 2008c. Drift-free Tracking of Rigid and Articulated Objects. IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2008), IEEE Computer Society.
Abstract
Model-based 3D tracker estimate the position, rotation, and joint angles of a <br>given model from video data of one or multiple cameras. They often rely on <br>image features that are tracked over time but the accumulation of small errors <br>results in a drift away from the target object.<br>In this work, we address the drift problem for the challenging task of human <br>motion capture and tracking in the presence of multiple moving objects where <br>the error accumulation becomes even more problematic due to occlusions. To this <br>end, we propose an analysis-by-synthesis framework for articulated models. It <br>combines the complementary concepts of patch-based and region-based matching to <br>track both structured and homogeneous body parts. The performance of our method <br>is demonstrated for rigid bodies, body parts, and full human bodies where the <br>sequences contain fast movements, self-occlusions, multiple moving objects, and <br>clutter. We also provide a quantitative error analysis and comparison with <br>other model-based approaches.
Export
BibTeX
@inproceedings{Gall-et-al_CVPR08, TITLE = {Drift-free Tracking of Rigid and Articulated Objects}, AUTHOR = {Gall, J{\"u}rgen and Rosenhahn, Bodo and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4244-2242-5}, URL = {http://www.mpi-inf.mpg.de/~jgall/download/jgall_drift_cvpr08.pdf}, DOI = {10.1109/CVPR.2008.4587558}, LOCALID = {Local-ID: C125756E0038A185-8077AF199AACEB2BC1257554004E6ADD-Gall2008}, PUBLISHER = {IEEE Computer Society}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {Model-based 3D tracker estimate the position, rotation, and joint angles of a <br>given model from video data of one or multiple cameras. They often rely on <br>image features that are tracked over time but the accumulation of small errors <br>results in a drift away from the target object.<br>In this work, we address the drift problem for the challenging task of human <br>motion capture and tracking in the presence of multiple moving objects where <br>the error accumulation becomes even more problematic due to occlusions. To this <br>end, we propose an analysis-by-synthesis framework for articulated models. It <br>combines the complementary concepts of patch-based and region-based matching to <br>track both structured and homogeneous body parts. The performance of our method <br>is demonstrated for rigid bodies, body parts, and full human bodies where the <br>sequences contain fast movements, self-occlusions, multiple moving objects, and <br>clutter. We also provide a quantitative error analysis and comparison with <br>other model-based approaches.}, BOOKTITLE = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2008)}, PAGES = {1--8}, ADDRESS = {Anchorage, AK, USA}, }
Endnote
%0 Conference Proceedings %A Gall, J&#252;rgen %A Rosenhahn, Bodo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Drift-free Tracking of Rigid and Articulated Objects : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1B73-F %F EDOC: 428087 %R 10.1109/CVPR.2008.4587558 %U http://www.mpi-inf.mpg.de/~jgall/download/jgall_drift_cvpr08.pdf %F OTHER: Local-ID: C125756E0038A185-8077AF199AACEB2BC1257554004E6ADD-Gall2008 %D 2008 %B 2008 IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2008-06-23 - 2008-06-28 %C Anchorage, AK, USA %X Model-based 3D tracker estimate the position, rotation, and joint angles of a <br>given model from video data of one or multiple cameras. They often rely on <br>image features that are tracked over time but the accumulation of small errors <br>results in a drift away from the target object.<br>In this work, we address the drift problem for the challenging task of human <br>motion capture and tracking in the presence of multiple moving objects where <br>the error accumulation becomes even more problematic due to occlusions. To this <br>end, we propose an analysis-by-synthesis framework for articulated models. It <br>combines the complementary concepts of patch-based and region-based matching to <br>track both structured and homogeneous body parts. The performance of our method <br>is demonstrated for rigid bodies, body parts, and full human bodies where the <br>sequences contain fast movements, self-occlusions, multiple moving objects, and <br>clutter. We also provide a quantitative error analysis and comparison with <br>other model-based approaches. %B IEEE Conference on Computer Vision and Pattern Recognition %P 1 - 8 %I IEEE Computer Society %@ 978-1-4244-2242-5
Galic, I., Weickert, J., Welk, M., Bruhn, A., Belyaev, A., and Seidel, H.-P. 2008. Image Compression with Anisotropic Diffusion. Journal of Mathematical Imaging and Vision31, 2/3.
Export
BibTeX
@article{Galic-et-al_JMIV08, TITLE = {Image Compression with Anisotropic Diffusion}, AUTHOR = {Galic, Irene and Weickert, Joachim and Welk, Martin and Bruhn, Andr{\'e}s and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0924-9907}, URL = {http://dx.doi.org/10.1007/s10851-008-0087-0}, DOI = {10.1007/s10851-008-0087-0}, LOCALID = {Local-ID: C125756E0038A185-4D6F7201EBC61DCCC1257578003011AD-Galic2008}, PUBLISHER = {Kluwer Academic Publishers}, ADDRESS = {Dordrecht, Holland}, YEAR = {2008}, DATE = {2008}, JOURNAL = {Journal of Mathematical Imaging and Vision}, VOLUME = {31}, NUMBER = {2/3}, PAGES = {255--269}, }
Endnote
%0 Journal Article %A Galic, Irene %A Weickert, Joachim %A Welk, Martin %A Bruhn, Andr&#233;s %A Belyaev, Alexander %A Seidel, Hans-Peter %+ External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Image Compression with Anisotropic Diffusion : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1BE6-F %F EDOC: 428085 %R 10.1007/s10851-008-0087-0 %U http://dx.doi.org/10.1007/s10851-008-0087-0 %F OTHER: Local-ID: C125756E0038A185-4D6F7201EBC61DCCC1257578003011AD-Galic2008 %D 2008 %* Review method: peer-reviewed %J Journal of Mathematical Imaging and Vision %V 31 %N 2/3 %& 255 %P 255 - 269 %I Kluwer Academic Publishers %C Dordrecht, Holland %@ false %U https://rdcu.be/dJdtK
Fuchs, M., Raskar, R., Seidel, H.-P., and Lensch, H.P.A. 2008a. Towards Passive 6D Reflectance Field Displays. ACM Transactions on Graphics, ACM.
Abstract
Traditional flat screen displays present 2D images. 3D and 4D<br> displays have been proposed making use of lenslet arrays to<br> shape a fixed outgoing light field for horizontal or<br> bidirectional parallax. In this article, we present different<br> designs of multi-dimensional displays which passively react to<br> the light of the environment behind. The prototypes physically<br> implement a reflectance field and generate different light<br> fields depending on the incident illumination, for example<br> light falling through a window.<br><br> We discretize the incident light field using an optical system,<br> and modulate it with a 2D pattern, creating a flat display<br> which is view \emph{and} illumination-dependent. It is free<br> from electronic components. For distant light and a fixed<br> observer position, we demonstrate a passive optical<br> configuration which directly renders a 4D reflectance field in<br> the real-world illumination behind it. We further propose an<br> optical setup that allows for projecting out different angular<br> distributions depending on the incident light direction.<br> Combining multiple of these devices we build a display that<br> renders a 6D experience, where the incident 2D illumination<br> influences the outgoing light field, both in the spatial and in<br> the angular domain. Possible applications of this technology<br> are time-dependent displays driven by sunlight, object<br> virtualization and programmable light benders / ray blockers<br> without moving parts.
Export
BibTeX
@inproceedings{Fuchs-et-al_SIGGRAPH08, TITLE = {Towards Passive {6D} Reflectance Field Displays}, AUTHOR = {Fuchs, Martin and Raskar, Ramesh and Seidel, Hans-Peter and Lensch, Hendrik P. A.}, LANGUAGE = {eng}, ISSN = {0730-0301}, ISBN = {978-1-4503-0112-1}, URL = {http://doi.acm.org/10.1145/1360612.1360657}, DOI = {10.1145/1360612.1360657}, LOCALID = {Local-ID: C125756E0038A185-8486275ACDC3517DC12575210049DD76-Fuchs_2007_TPRFD}, PUBLISHER = {ACM}, PUBLISHER = {Association for Computing Machinery}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {Traditional flat screen displays present 2D images. 3D and 4D<br> displays have been proposed making use of lenslet arrays to<br> shape a fixed outgoing light field for horizontal or<br> bidirectional parallax. In this article, we present different<br> designs of multi-dimensional displays which passively react to<br> the light of the environment behind. The prototypes physically<br> implement a reflectance field and generate different light<br> fields depending on the incident illumination, for example<br> light falling through a window.<br><br> We discretize the incident light field using an optical system,<br> and modulate it with a 2D pattern, creating a flat display<br> which is view \emph{and} illumination-dependent. It is free<br> from electronic components. For distant light and a fixed<br> observer position, we demonstrate a passive optical<br> configuration which directly renders a 4D reflectance field in<br> the real-world illumination behind it. We further propose an<br> optical setup that allows for projecting out different angular<br> distributions depending on the incident light direction.<br> Combining multiple of these devices we build a display that<br> renders a 6D experience, where the incident 2D illumination<br> influences the outgoing light field, both in the spatial and in<br> the angular domain. Possible applications of this technology<br> are time-dependent displays driven by sunlight, object<br> virtualization and programmable light benders / ray blockers<br> without moving parts.}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2008}, EDITOR = {Turk, Greg}, PAGES = {58.1--58.8}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {27}, ISSUE = {3}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Fuchs, Martin %A Raskar, Ramesh %A Seidel, Hans-Peter %A Lensch, Hendrik P. A. %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Towards Passive 6D Reflectance Field Displays : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1D43-F %F EDOC: 428083 %R 10.1145/1360612.1360657 %U http://doi.acm.org/10.1145/1360612.1360657 %F OTHER: Local-ID: C125756E0038A185-8486275ACDC3517DC12575210049DD76-Fuchs_2007_TPRFD %D 2008 %B ACM SIGGRAPH 2008 %Z date of event: 2008-08-11 - 2008-08-15 %C Los Angeles, CA, USA %X Traditional flat screen displays present 2D images. 3D and 4D<br> displays have been proposed making use of lenslet arrays to<br> shape a fixed outgoing light field for horizontal or<br> bidirectional parallax. In this article, we present different<br> designs of multi-dimensional displays which passively react to<br> the light of the environment behind. The prototypes physically<br> implement a reflectance field and generate different light<br> fields depending on the incident illumination, for example<br> light falling through a window.<br><br> We discretize the incident light field using an optical system,<br> and modulate it with a 2D pattern, creating a flat display<br> which is view \emph{and} illumination-dependent. It is free<br> from electronic components. For distant light and a fixed<br> observer position, we demonstrate a passive optical<br> configuration which directly renders a 4D reflectance field in<br> the real-world illumination behind it. We further propose an<br> optical setup that allows for projecting out different angular<br> distributions depending on the incident light direction.<br> Combining multiple of these devices we build a display that<br> renders a 6D experience, where the incident 2D illumination<br> influences the outgoing light field, both in the spatial and in<br> the angular domain. Possible applications of this technology<br> are time-dependent displays driven by sunlight, object<br> virtualization and programmable light benders / ray blockers<br> without moving parts. %B Proceedings of ACM SIGGRAPH 2008 %E Turk, Greg %P 58.1 - 58.8 %I ACM %@ 978-1-4503-0112-1 %J ACM Transactions on Graphics %V 27 %N 3 %I Association for Computing Machinery %@ false
Fuchs, C., Heinz, M., Seidel, H.-P., Lensch, H.P.A., and Levoy, M. 2008b. Combining Confocal Imaging and Descattering. Computer Graphics Forum (Proc. EGSR 2008), Blackwell.
Export
BibTeX
@inproceedings{Fuchs-et-al_EGSR08, TITLE = {Combining Confocal Imaging and Descattering}, AUTHOR = {Fuchs, Christian and Heinz, Michael and Seidel, Hans-Peter and Lensch, Hendrik P. A. and Levoy, Marc}, LANGUAGE = {eng}, ISSN = {0167-7055}, URL = {http://www.mpi-inf.mpg.de/~cfuchs/Fuchs-2008-CCD.pdf}, DOI = {10.1111/j.1467-8659.2008.01263.x}, LOCALID = {Local-ID: C125756E0038A185-56003BE564C38814C125755C002B4A3A-Fuchs:2008:CCD}, PUBLISHER = {Blackwell}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2008}, DATE = {2008}, BOOKTITLE = {EGSR '08: Proceedings of the Nineteenth Eurographics Conference on Rendering}, EDITOR = {Marschner, Steve and Wimmer, Michael}, PAGES = {1245--1253}, JOURNAL = {Computer Graphics Forum (Proc. EGSR)}, VOLUME = {27}, ISSUE = {4}, ADDRESS = {Sarajevo, Bosnia and Herzegovina}, }
Endnote
%0 Conference Proceedings %A Fuchs, Christian %A Heinz, Michael %A Seidel, Hans-Peter %A Lensch, Hendrik P. A. %A Levoy, Marc %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Combining Confocal Imaging and Descattering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1B3A-2 %F EDOC: 428082 %U http://www.mpi-inf.mpg.de/~cfuchs/Fuchs-2008-CCD.pdf %F OTHER: Local-ID: C125756E0038A185-56003BE564C38814C125755C002B4A3A-Fuchs:2008:CCD %R 10.1111/j.1467-8659.2008.01263.x %D 2008 %B Nineteenth Eurographics Conference on Rendering %Z date of event: 2008-06-23 - 2008-06-25 %C Sarajevo, Bosnia and Herzegovina %B EGSR '08: Proceedings of the Nineteenth Eurographics Conference on Rendering %E Marschner, Steve; Wimmer, Michael %P 1245 - 1253 %I Blackwell %J Computer Graphics Forum %V 27 %N 4 %I Blackwell-Wiley %@ false
Didyk, P., Mantiuk, R., Hein, M., and Seidel, H.-P. 2008. Enhancement of Bright Video Features for HDR Displays. Computer Graphics Forum (Proc. EGSR 2008), Blackwell.
Abstract
To utilize the full potential of new high dynamic range (HDR)<br> displays, a system for the enhancement of bright luminous objects in<br> video sequences is proposed. The system classifies clipped<br> (saturated) regions as lights, reflections or diffuse surfaces using<br> a semi-automatic classifier and then enhances each class of objects<br> with respect to its relative brightness. The enhancement algorithm<br> can significantly stretch the contrast of clipped regions while<br> avoiding amplification of noise and contouring. We demonstrate that<br> the enhanced video is strongly preferred to non-enhanced video, and<br> it compares favorably to other methods.
Export
BibTeX
@inproceedings{Didyk-et-al_EGSR08, TITLE = {Enhancement of Bright Video Features for {HDR} Displays}, AUTHOR = {Didyk, Piotr and Mantiuk, Rafa{\l} and Hein, Matthias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, URL = {http://dx.doi.org/10.1111/j.1467-8659.2008.01265.x}, DOI = {10.1111/j.1467-8659.2008.01265.x}, LOCALID = {Local-ID: C125756E0038A185-C4C021EEB290A062C1257465005B05E8-didyk2008ebvfhdrd}, PUBLISHER = {Blackwell}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {To utilize the full potential of new high dynamic range (HDR)<br> displays, a system for the enhancement of bright luminous objects in<br> video sequences is proposed. The system classifies clipped<br> (saturated) regions as lights, reflections or diffuse surfaces using<br> a semi-automatic classifier and then enhances each class of objects<br> with respect to its relative brightness. The enhancement algorithm<br> can significantly stretch the contrast of clipped regions while<br> avoiding amplification of noise and contouring. We demonstrate that<br> the enhanced video is strongly preferred to non-enhanced video, and<br> it compares favorably to other methods.}, BOOKTITLE = {EGSR '08: Proceedings of the Nineteenth Eurographics Conference on Rendering}, EDITOR = {Marschner, Steve and Wimmer, Michael}, PAGES = {1265--1274}, JOURNAL = {Computer Graphics Forum (Proc. EGSR)}, VOLUME = {27}, ISSUE = {4}, ADDRESS = {Sarajevo, Bosnia and Herzegovina}, }
Endnote
%0 Conference Proceedings %A Didyk, Piotr %A Mantiuk, Rafa&#322; %A Hein, Matthias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Enhancement of Bright Video Features for HDR Displays : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1B87-3 %F EDOC: 428036 %R 10.1111/j.1467-8659.2008.01265.x %U http://dx.doi.org/10.1111/j.1467-8659.2008.01265.x %F OTHER: Local-ID: C125756E0038A185-C4C021EEB290A062C1257465005B05E8-didyk2008ebvfhdrd %D 2008 %B Nineteenth Eurographics Conference on Rendering %Z date of event: 2008-06-23 - 2008-06-25 %C Sarajevo, Bosnia and Herzegovina %X To utilize the full potential of new high dynamic range (HDR)<br> displays, a system for the enhancement of bright luminous objects in<br> video sequences is proposed. The system classifies clipped<br> (saturated) regions as lights, reflections or diffuse surfaces using<br> a semi-automatic classifier and then enhances each class of objects<br> with respect to its relative brightness. The enhancement algorithm<br> can significantly stretch the contrast of clipped regions while<br> avoiding amplification of noise and contouring. We demonstrate that<br> the enhanced video is strongly preferred to non-enhanced video, and<br> it compares favorably to other methods. %B EGSR '08: Proceedings of the Nineteenth Eurographics Conference on Rendering %E Marschner, Steve; Wimmer, Michael %P 1265 - 1274 %I Blackwell %J Computer Graphics Forum %V 27 %N 4 %I Blackwell-Wiley %@ false
De Aguiar, E., Theobalt, C., Stoll, C., Ahmed, N., Seidel, H.-P., and Thrun, S. 2008a. Performance Capture from Sparse Multi-view Video. ACM Transactions on Graphics (Proc. SIGGRAPH 2008), ACM.
Abstract
This paper proposes a new marker-less approach to capturing human performances <br>from multi-view video. Our algorithm can jointly reconstruct spatio-temporally <br>coherent geometry, motion and textural surface appearance of actors that <br>perform complex and rapid moves. Furthermore, since our algorithm is purely <br>meshbased and makes as few as possible prior assumptions about the type of <br>subject being tracked, it can even capture performances of people wearing wide <br>apparel, such as a dancer wearing a skirt. To serve this purpose our method <br>efficiently and effectively combines the power of surface- and volume-based <br>shape deformation techniques with a new mesh-based analysis-through-synthesis <br>framework. This framework extracts motion constraints from video and makes the <br>laser-scan of the tracked subject mimic the recorded performance. Also <br>small-scale time-varying shape detail is recovered by applying model-guided <br>multi-view stereo to refine the model surface. Our method delivers captured <br>performance data at higher level of detail, is highly versatile, and is <br>applicable to many complex types of scenes that could not be handled by <br>alternative marker-based or marker-free recording techniques.
Export
BibTeX
@inproceedings{Theobalt-et-al_SIGGRAPH08, TITLE = {Performance Capture from Sparse Multi-view Video}, AUTHOR = {de Aguiar, Edilson and Theobalt, Christian and Stoll, Carsten and Ahmed, Naveed and Seidel, Hans-Peter and Thrun, Sebastian}, LANGUAGE = {eng}, ISSN = {0730-0301}, ISBN = {978-1-4503-0112-1}, URL = {http://doi.acm.org/10.1145/1360612.1360697}, DOI = {10.1145/1360612.1360697}, LOCALID = {Local-ID: C125756E0038A185-2E696CF3894EE2D9C125755400496998-deAguiar2008}, PUBLISHER = {ACM}, PUBLISHER = {Association for Computing Machinery}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {This paper proposes a new marker-less approach to capturing human performances <br>from multi-view video. Our algorithm can jointly reconstruct spatio-temporally <br>coherent geometry, motion and textural surface appearance of actors that <br>perform complex and rapid moves. Furthermore, since our algorithm is purely <br>meshbased and makes as few as possible prior assumptions about the type of <br>subject being tracked, it can even capture performances of people wearing wide <br>apparel, such as a dancer wearing a skirt. To serve this purpose our method <br>efficiently and effectively combines the power of surface- and volume-based <br>shape deformation techniques with a new mesh-based analysis-through-synthesis <br>framework. This framework extracts motion constraints from video and makes the <br>laser-scan of the tracked subject mimic the recorded performance. Also <br>small-scale time-varying shape detail is recovered by applying model-guided <br>multi-view stereo to refine the model surface. Our method delivers captured <br>performance data at higher level of detail, is highly versatile, and is <br>applicable to many complex types of scenes that could not be handled by <br>alternative marker-based or marker-free recording techniques.}, BOOKTITLE = {SIGGRAPH '08: ACM SIGGRAPH 2008 papers}, EDITOR = {Turk, Greg}, PAGES = {98:1--98:10}, JOURNAL = {ACM Transactions on Graphics (Proc. SIGGRAPH)}, VOLUME = {27}, ISSUE = {3}, ADDRESS = {Los Angeles, USA}, }
Endnote
%0 Conference Proceedings %A de Aguiar, Edilson %A Theobalt, Christian %A Stoll, Carsten %A Ahmed, Naveed %A Seidel, Hans-Peter %A Thrun, Sebastian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Performance Capture from Sparse Multi-view Video : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1CA4-0 %F EDOC: 428027 %R 10.1145/1360612.1360697 %U http://doi.acm.org/10.1145/1360612.1360697 %F OTHER: Local-ID: C125756E0038A185-2E696CF3894EE2D9C125755400496998-deAguiar2008 %D 2008 %B ACM SIGGRAPH 2008 %Z date of event: 2008-08-11 - 2008-08-15 %C Los Angeles, USA %X This paper proposes a new marker-less approach to capturing human performances <br>from multi-view video. Our algorithm can jointly reconstruct spatio-temporally <br>coherent geometry, motion and textural surface appearance of actors that <br>perform complex and rapid moves. Furthermore, since our algorithm is purely <br>meshbased and makes as few as possible prior assumptions about the type of <br>subject being tracked, it can even capture performances of people wearing wide <br>apparel, such as a dancer wearing a skirt. To serve this purpose our method <br>efficiently and effectively combines the power of surface- and volume-based <br>shape deformation techniques with a new mesh-based analysis-through-synthesis <br>framework. This framework extracts motion constraints from video and makes the <br>laser-scan of the tracked subject mimic the recorded performance. Also <br>small-scale time-varying shape detail is recovered by applying model-guided <br>multi-view stereo to refine the model surface. Our method delivers captured <br>performance data at higher level of detail, is highly versatile, and is <br>applicable to many complex types of scenes that could not be handled by <br>alternative marker-based or marker-free recording techniques. %B SIGGRAPH '08: ACM SIGGRAPH 2008 papers %E Turk, Greg %P 98:1 - 98:10 %I ACM %@ 978-1-4503-0112-1 %J ACM Transactions on Graphics %V 27 %N 3 %I Association for Computing Machinery %@ false
De Aguiar, E., Theobalt, C., Thrun, S., and Seidel, H.-P. 2008b. Automatic Conversion of Mesh Animations into Skeleton-based Animations. Computer Graphics Forum, Blackwell.
Export
BibTeX
@inproceedings{Theobalt-et-al_SIGGRAPH08.2, TITLE = {Automatic Conversion of Mesh Animations into Skeleton-based Animations}, AUTHOR = {de Aguiar, Edilson and Theobalt, Christian and Thrun, Sebastian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, URL = {http://dx.doi.orr/10.1111/j.1467-8659.2008.01136.x}, DOI = {10.1111/j.1467-8659.2008.01136.x}, LOCALID = {Local-ID: C125756E0038A185-35BFCCFEB0EC4B11C1257576003680D0-deAguiar2008z}, PUBLISHER = {Blackwell}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2008}, DATE = {2008}, BOOKTITLE = {EUROGRAPHICS 2008}, EDITOR = {Drettakis, George and Scopigno, Roberto}, PAGES = {389--397}, JOURNAL = {Computer Graphics Forum}, VOLUME = {27}, ISSUE = {2}, ADDRESS = {Crete, Greece}, }
Endnote
%0 Conference Proceedings %A de Aguiar, Edilson %A Theobalt, Christian %A Thrun, Sebastian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Automatic Conversion of Mesh Animations into Skeleton-based Animations : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1B1B-8 %F EDOC: 428028 %R 10.1111/j.1467-8659.2008.01136.x %U http://dx.doi.orr/10.1111/j.1467-8659.2008.01136.x %F OTHER: Local-ID: C125756E0038A185-35BFCCFEB0EC4B11C1257576003680D0-deAguiar2008z %D 2008 %B 29th Annual Conference of the European Association for Computer Graphics %Z date of event: 2008-04-14 - 2008-04-18 %C Crete, Greece %B EUROGRAPHICS 2008 %E Drettakis, George; Scopigno, Roberto %P 389 - 397 %I Blackwell %J Computer Graphics Forum %V 27 %N 2 %I Blackwell-Wiley %@ false
Chen, T., Seidel, H.-P., and Lensch, H.P.A. 2008. Modulated Phase-shifting for 3D Scanning. IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2008), IEEE Computer Society.
Export
BibTeX
@inproceedings{Chen-et-al_CVPR08, TITLE = {Modulated Phase-shifting for {3D} Scanning}, AUTHOR = {Chen, Tongbo and Seidel, Hans-Peter and Lensch, Hendrik P. A.}, LANGUAGE = {eng}, ISBN = {978-1-4244-2242-5}, URL = {http://dx.doi.org/10.1109/CVPR.2008.4587836}, DOI = {10.1109/CVPR.2008.4587836}, LOCALID = {Local-ID: C125756E0038A185-24DF44B1A223D7FCC1257558003F356A-Chen:CVPR2008}, PUBLISHER = {IEEE Computer Society}, YEAR = {2008}, DATE = {2008}, BOOKTITLE = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2008)}, PAGES = {1--8}, ADDRESS = {Anchorage, AK, USA}, }
Endnote
%0 Conference Proceedings %A Chen, Tongbo %A Seidel, Hans-Peter %A Lensch, Hendrik P. A. %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Modulated Phase-shifting for 3D Scanning : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1C47-F %F EDOC: 428013 %R 10.1109/CVPR.2008.4587836 %U http://dx.doi.org/10.1109/CVPR.2008.4587836 %F OTHER: Local-ID: C125756E0038A185-24DF44B1A223D7FCC1257558003F356A-Chen:CVPR2008 %D 2008 %B 2008 IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2008-06-23 - 2008-06-28 %C Anchorage, AK, USA %B IEEE Conference on Computer Vision and Pattern Recognition %P 1 - 8 %I IEEE Computer Society %@ 978-1-4244-2242-5
Bokeloh, M., Berner, A., Wand, M., Seidel, H.-P., and Schilling, A. 2008. Slippage Features. Wilhelm-Schickard-Institut / Universität Tübingen, Tübingen.
Export
BibTeX
@techreport{Bokeloh2008, TITLE = {Slippage Features}, AUTHOR = {Bokeloh, Martin and Berner, Alexander and Wand, Michael and Seidel, Hans-Peter and Schilling, Andreas}, LANGUAGE = {eng}, ISSN = {0946-3852}, URL = {urn:nbn:de:bsz:21-opus-33880}, NUMBER = {WSI-2008-03}, INSTITUTION = {Wilhelm-Schickard-Institut / Universit{\"a}t T{\"u}bingen}, ADDRESS = {T{\"u}bingen}, YEAR = {2008}, DATE = {2008}, TYPE = {WSI}, VOLUME = {2008-03}, }
Endnote
%0 Report %A Bokeloh, Martin %A Berner, Alexander %A Wand, Michael %A Seidel, Hans-Peter %A Schilling, Andreas %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Slippage Features : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0023-D3FC-F %U urn:nbn:de:bsz:21-opus-33880 %Y Wilhelm-Schickard-Institut / Universit&#228;t T&#252;bingen %C T&#252;bingen %D 2008 %P 17 p. %B WSI %N 2008-03 %@ false %U http://nbn-resolving.de/urn:nbn:de:bsz:21-opus-33880
Berner, A., Bokeloh, M., Wand, M., Schilling, A., and Seidel, H.-P. 2008. A Graph-Based Approach to Symmetry Detection. VG-PBG08: Eurographics/IEEE VGTC on Volume and Point-Based Graphics, Eurographics Association.
Export
BibTeX
@inproceedings{Berner-et-al_VG-PBG08, TITLE = {A Graph-Based Approach to Symmetry Detection}, AUTHOR = {Berner, Alexander and Bokeloh, Martin and Wand, Michael and Schilling, Andreas and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-905674-12-5}, DOI = {10.2312/VG/VG-PBG08/001-008}, LOCALID = {Local-ID: C125756E0038A185-5D3E4D89CB91674FC125754A0048B726-Berner2008}, PUBLISHER = {Eurographics Association}, YEAR = {2008}, DATE = {2008}, BOOKTITLE = {VG-PBG08: Eurographics/IEEE VGTC on Volume and Point-Based Graphics}, EDITOR = {Hege, Hans-Christian and Laidlaw, David and Pascucci, Valerio and Ynnerman, Anders and Botsch, Mario and Pajarola, Renato and Staadt, Oliver and Zwicker, Matthias}, PAGES = {1--8}, ADDRESS = {Los Angeles, CA}, }
Endnote
%0 Conference Proceedings %A Berner, Alexander %A Bokeloh, Martin %A Wand, Michael %A Schilling, Andreas %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T A Graph-Based Approach to Symmetry Detection : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1ACD-2 %F EDOC: 427997 %F OTHER: Local-ID: C125756E0038A185-5D3E4D89CB91674FC125754A0048B726-Berner2008 %R 10.2312/VG/VG-PBG08/001-008 %D 2008 %B VG-PBG08: Eurographics/IEEE VGTC on Volume and Point-Based Graphics %Z date of event: 2008-08-10 - 2008-08-11 %C Los Angeles, CA %B VG-PBG08: Eurographics/IEEE VGTC on Volume and Point-Based Graphics %E Hege, Hans-Christian; Laidlaw, David; Pascucci, Valerio; Ynnerman, Anders; Botsch, Mario; Pajarola, Renato; Staadt, Oliver; Zwicker, Matthias %P 1 - 8 %I Eurographics Association %@ 978-3-905674-12-5
Baak, A., Müller, M., and Seidel, H.-P. 2008. An Efficient Algorithm for Keyframe-based Motion Retrieval in the Presence of Temporal Deformations. MIR ’08: Proceedings of the 1st ACM International Conference on Multimedia Information Retrieval, ACM.
Abstract
In the last years, various algorithms have been proposed for automatic<br>classification and retrieval of motion capture data. Here, one main<br>difficulty is due to the fact that similar types of motions may exhibit<br>significant spatial as well as temporal variations. To cope with such<br>variations, previous algorithms often rely on warping and alignment<br>techniques that are computationally time and cost intensive.<br>In this paper, we present a novel keyframe-based algorithm that<br>significantly speeds up the retrieval process and drastically<br>reduces memory requirements. <br>In contrast to previous index-based strategies, our recursive algorithm <br>can cope with temporal variations. In particular, the degree of <br>admissible deformation tolerance between the queried keyframes<br>can be controlled by an explicit stiffness parameter.<br>While our algorithm works for general multimedia data,<br>we concentrate on demonstrating the practicability of <br>our concept by means of the motion retrieval scenario. <br>Our experiments show that one can typically cut down the<br>search space from several hours to a couple of minutes of<br>motion capture data within a fraction of a second.
Export
BibTeX
@inproceedings{Baak-et-al_MIR08, TITLE = {An Efficient Algorithm for Keyframe-based Motion Retrieval in the Presence of Temporal Deformations}, AUTHOR = {Baak, Andreas and M{\"u}ller, Meinard and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-60558-312-9}, URL = {http://doi.acm.org/10.1145/1460096.1460169}, DOI = {10.1145/1460096.1460169}, LOCALID = {Local-ID: C125756E0038A185-C58B63122961E157C125753E003273BF-BaakMS08_KeyframeMotion_ACM-MIR}, PUBLISHER = {ACM}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {In the last years, various algorithms have been proposed for automatic<br>classification and retrieval of motion capture data. Here, one main<br>difficulty is due to the fact that similar types of motions may exhibit<br>significant spatial as well as temporal variations. To cope with such<br>variations, previous algorithms often rely on warping and alignment<br>techniques that are computationally time and cost intensive.<br>In this paper, we present a novel keyframe-based algorithm that<br>significantly speeds up the retrieval process and drastically<br>reduces memory requirements. <br>In contrast to previous index-based strategies, our recursive algorithm <br>can cope with temporal variations. In particular, the degree of <br>admissible deformation tolerance between the queried keyframes<br>can be controlled by an explicit stiffness parameter.<br>While our algorithm works for general multimedia data,<br>we concentrate on demonstrating the practicability of <br>our concept by means of the motion retrieval scenario. <br>Our experiments show that one can typically cut down the<br>search space from several hours to a couple of minutes of<br>motion capture data within a fraction of a second.}, BOOKTITLE = {MIR '08: Proceedings of the 1st ACM International Conference on Multimedia Information Retrieval}, PAGES = {451--458}, ADDRESS = {Vancouver, British Columbia, Canada}, }
Endnote
%0 Conference Proceedings %A Baak, Andreas %A M&#252;ller, Meinard %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T An Efficient Algorithm for Keyframe-based Motion Retrieval in the Presence of Temporal Deformations : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1AE2-0 %F EDOC: 427984 %R 10.1145/1460096.1460169 %U http://doi.acm.org/10.1145/1460096.1460169 %F OTHER: Local-ID: C125756E0038A185-C58B63122961E157C125753E003273BF-BaakMS08_KeyframeMotion_ACM-MIR %D 2008 %B 1st ACM International Conference on Multimedia Information Retrieval %Z date of event: 2008-10-30 - 2008-10-31 %C Vancouver, British Columbia, Canada %X In the last years, various algorithms have been proposed for automatic<br>classification and retrieval of motion capture data. Here, one main<br>difficulty is due to the fact that similar types of motions may exhibit<br>significant spatial as well as temporal variations. To cope with such<br>variations, previous algorithms often rely on warping and alignment<br>techniques that are computationally time and cost intensive.<br>In this paper, we present a novel keyframe-based algorithm that<br>significantly speeds up the retrieval process and drastically<br>reduces memory requirements. <br>In contrast to previous index-based strategies, our recursive algorithm <br>can cope with temporal variations. In particular, the degree of <br>admissible deformation tolerance between the queried keyframes<br>can be controlled by an explicit stiffness parameter.<br>While our algorithm works for general multimedia data,<br>we concentrate on demonstrating the practicability of <br>our concept by means of the motion retrieval scenario. <br>Our experiments show that one can typically cut down the<br>search space from several hours to a couple of minutes of<br>motion capture data within a fraction of a second. %B MIR '08: Proceedings of the 1st ACM International Conference on Multimedia Information Retrieval %P 451 - 458 %I ACM %@ 978-1-60558-312-9
Aydin, T.O., Mantiuk, R., Myszkowski, K., and Seidel, H.-P. 2008a. Dynamic Range Independent Image Quality Assessment. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2008), ACM.
Abstract
The diversity of display technologies and introduction of high dynamic range <br>imagery introduces the necessity of comparing images of radically different <br>dynamic ranges. Current quality assessment metrics are not suitable for this <br>task, as they assume that both reference and test images have the same dynamic <br>range. Image fidelity measures employed by a majority of current metrics, based <br>on the difference of pixel intensity or contrast values between test and <br>reference images, result in meaningless predictions if this assumption does not <br>hold. We present a novel image quality metric capable of operating on an image <br>pair where both images have arbitrary dynamic ranges. Our metric utilizes a <br>model of the human visual system, and its central idea is a new definition of <br>visible distortion based on the detection and classification of visible changes <br>in the image structure. Our metric is carefully calibrated and its performance <br>is validated through perceptual experiments. We demonstrate possible <br>applications of our metric to the evaluation of direct and inverse tone mapping <br>operators as well as the analysis of the image appearance on displays with <br>various characteristics.
Export
BibTeX
@inproceedings{Aydin2008s, TITLE = {Dynamic Range Independent Image Quality Assessment}, AUTHOR = {Aydin, Tunc Ozan and Mantiuk, Rafa{\l} and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, ISBN = {978-1-4503-0112-1}, DOI = {10.1145/1360612.1360668}, PUBLISHER = {ACM}, PUBLISHER = {Association for Computing Machinery}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {The diversity of display technologies and introduction of high dynamic range <br>imagery introduces the necessity of comparing images of radically different <br>dynamic ranges. Current quality assessment metrics are not suitable for this <br>task, as they assume that both reference and test images have the same dynamic <br>range. Image fidelity measures employed by a majority of current metrics, based <br>on the difference of pixel intensity or contrast values between test and <br>reference images, result in meaningless predictions if this assumption does not <br>hold. We present a novel image quality metric capable of operating on an image <br>pair where both images have arbitrary dynamic ranges. Our metric utilizes a <br>model of the human visual system, and its central idea is a new definition of <br>visible distortion based on the detection and classification of visible changes <br>in the image structure. Our metric is carefully calibrated and its performance <br>is validated through perceptual experiments. We demonstrate possible <br>applications of our metric to the evaluation of direct and inverse tone mapping <br>operators as well as the analysis of the image appearance on displays with <br>various characteristics.}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2008}, EDITOR = {Turk, Greg}, PAGES = {69:1--69:10}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {27}, ISSUE = {3}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Aydin, Tunc Ozan %A Mantiuk, Rafa&#322; %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Dynamic Range Independent Image Quality Assessment : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1B77-7 %R 10.1145/1360612.1360668 %D 2008 %B ACM SIGGRAPH 2008 %Z date of event: 2008-08-11 - 2008-08-15 %C Los Angeles, CA, USA %X The diversity of display technologies and introduction of high dynamic range <br>imagery introduces the necessity of comparing images of radically different <br>dynamic ranges. Current quality assessment metrics are not suitable for this <br>task, as they assume that both reference and test images have the same dynamic <br>range. Image fidelity measures employed by a majority of current metrics, based <br>on the difference of pixel intensity or contrast values between test and <br>reference images, result in meaningless predictions if this assumption does not <br>hold. We present a novel image quality metric capable of operating on an image <br>pair where both images have arbitrary dynamic ranges. Our metric utilizes a <br>model of the human visual system, and its central idea is a new definition of <br>visible distortion based on the detection and classification of visible changes <br>in the image structure. Our metric is carefully calibrated and its performance <br>is validated through perceptual experiments. We demonstrate possible <br>applications of our metric to the evaluation of direct and inverse tone mapping <br>operators as well as the analysis of the image appearance on displays with <br>various characteristics. %B Proceedings of ACM SIGGRAPH 2008 %E Turk, Greg %P 69:1 - 69:10 %I ACM %@ 978-1-4503-0112-1 %J ACM Transactions on Graphics %V 27 %N 3 %I Association for Computing Machinery %@ false
Aydin, T.O., Mantiuk, R., and Seidel, H.-P. 2008b. Extending Quality Metrics to Full Luminance Range Images. Human Vision and Electronic Imaging XIII, SPIE-IS&T.
Abstract
Many quality metrics take as input gamma corrected images and<br> assume that pixel code values are scaled perceptually uniform. <br> Although this is a valid assumption for darker displays<br> operating in the luminance range typical for CRT displays (from 0.1<br> to 80 $cd/m^2$), it is no longer true for much brighter LCD displays<br> (typically up to 500 $cd/m^2$), plasma displays (small regions up to<br> 1000 $cd/m^2$) and HDR displays (up to 3000 $cd/m^2$). The<br> distortions that are barely visible on dark displays become clearly<br> noticeable when shown on much brighter displays. To estimate quality<br> of images shown on bright displays, we propose a straightforward<br> extension to the popular quality metrics, such as PSNR and SSIM,<br> that makes them capable of handling all luminance levels visible to<br> the human eye without altering their results for typical CRT display<br> luminance levels. Such extended quality metrics can be used to<br> estimate quality of high dynamic range (HDR) images as well as<br> account for display brightness.
Export
BibTeX
@inproceedings{Aydin-et-al_HVEI08, TITLE = {Extending Quality Metrics to Full Luminance Range Images}, AUTHOR = {Aydin, Tunc Ozan and Mantiuk, Rafa{\l} and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-0-8194-6978-6}, URL = {http://dx.doi.org/10.1117/12.765095}, DOI = {10.1117/12.765095}, LOCALID = {Local-ID: C125756E0038A185-7860639033CA4E41C12573F3005486AA-aydin_2008_spie}, PUBLISHER = {SPIE-IS\&T}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {Many quality metrics take as input gamma corrected images and<br> assume that pixel code values are scaled perceptually uniform. <br> Although this is a valid assumption for darker displays<br> operating in the luminance range typical for CRT displays (from 0.1<br> to 80 $cd/m^2$), it is no longer true for much brighter LCD displays<br> (typically up to 500 $cd/m^2$), plasma displays (small regions up to<br> 1000 $cd/m^2$) and HDR displays (up to 3000 $cd/m^2$). The<br> distortions that are barely visible on dark displays become clearly<br> noticeable when shown on much brighter displays. To estimate quality<br> of images shown on bright displays, we propose a straightforward<br> extension to the popular quality metrics, such as PSNR and SSIM,<br> that makes them capable of handling all luminance levels visible to<br> the human eye without altering their results for typical CRT display<br> luminance levels. Such extended quality metrics can be used to<br> estimate quality of high dynamic range (HDR) images as well as<br> account for display brightness.}, BOOKTITLE = {Human Vision and Electronic Imaging XIII}, EDITOR = {Rogowitz, Bernice E. and Pappas, Thrasyvoulos N.}, PAGES = {6806B-1--6806B-10}, SERIES = {Proceedings of SPIE}, VOLUME = {6806}, ADDRESS = {San Jose, USA}, }
Endnote
%0 Conference Proceedings %A Aydin, Tunc Ozan %A Mantiuk, Rafa&#322; %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Extending Quality Metrics to Full Luminance Range Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1BA3-5 %F EDOC: 427983 %R 10.1117/12.765095 %U http://dx.doi.org/10.1117/12.765095 %F OTHER: Local-ID: C125756E0038A185-7860639033CA4E41C12573F3005486AA-aydin_2008_spie %D 2008 %B Human Vision and Electronic Imaging XIII %Z date of event: 2008-01-28 - 2008-01-31 %C San Jose, USA %X Many quality metrics take as input gamma corrected images and<br> assume that pixel code values are scaled perceptually uniform. <br> Although this is a valid assumption for darker displays<br> operating in the luminance range typical for CRT displays (from 0.1<br> to 80 $cd/m^2$), it is no longer true for much brighter LCD displays<br> (typically up to 500 $cd/m^2$), plasma displays (small regions up to<br> 1000 $cd/m^2$) and HDR displays (up to 3000 $cd/m^2$). The<br> distortions that are barely visible on dark displays become clearly<br> noticeable when shown on much brighter displays. To estimate quality<br> of images shown on bright displays, we propose a straightforward<br> extension to the popular quality metrics, such as PSNR and SSIM,<br> that makes them capable of handling all luminance levels visible to<br> the human eye without altering their results for typical CRT display<br> luminance levels. Such extended quality metrics can be used to<br> estimate quality of high dynamic range (HDR) images as well as<br> account for display brightness. %B Human Vision and Electronic Imaging XIII %E Rogowitz, Bernice E.; Pappas, Thrasyvoulos N. %P 6806B-1 - 6806B-10 %I SPIE-IS&T %@ 978-0-8194-6978-6 %B Proceedings of SPIE %N 6806
Atcheson, B., Ihrke, I., Heidrich, W., et al. 2008. Time-resolved 3D Capture of Non-stationary Gas Flows. ACM Transactions on Graphics (Proc. SIGGRAPH Asia 2008), ACM.
Abstract
Fluid simulation is one of the most active research areas in computer graphics. <br>However, it remains difficult to obtain measurements of real fluid flows for <br>validation of the simulated data.<br><br>In this paper, we take a step in the direction of capturing flow data for such <br>purposes. Specifically, we present the first time-resolved Schlieren tomography <br>system for capturing full 3D, non-stationary gas flows on a dense volumetric <br>grid. Schlieren tomography uses 2D ray deflection measurements to reconstruct a <br>time-varying grid of 3D refractive index values, which directly correspond to <br>physical properties of the flow. We derive a new solution for this <br>reconstruction problem that lends itself to efficient algorithms that robustly <br>work with relatively small numbers of cameras. Our physical system is easy to <br>set up, and consists of an array of relatively low cost rolling-shutter <br>camcorders that are synchronized with a new approach. We demonstrate our method <br>with real measurements, and analyze precision with synthetic data for which <br>ground truth information is available.
Export
BibTeX
@inproceedings{Atcheson-et-al_SIGGRAPH08, TITLE = {Time-resolved {3D} Capture of Non-stationary Gas Flows}, AUTHOR = {Atcheson, Bradley and Ihrke, Ivo and Heidrich, Wolfgang and Tevs, Art and Bradley, Derek and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, ISBN = {978-1-4503-1831-0}, URL = {http://www.cs.ubc.ca/labs/imager/tr/2008/GasCapture/gascapture.pdf}, DOI = {10.1145/1457515.1409085}, LOCALID = {Local-ID: C125756E0038A185-42313017B538F8DDC125755400566273-Atcheson:2008}, PUBLISHER = {ACM}, PUBLISHER = {Association for Computing Machinery}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {Fluid simulation is one of the most active research areas in computer graphics. <br>However, it remains difficult to obtain measurements of real fluid flows for <br>validation of the simulated data.<br><br>In this paper, we take a step in the direction of capturing flow data for such <br>purposes. Specifically, we present the first time-resolved Schlieren tomography <br>system for capturing full 3D, non-stationary gas flows on a dense volumetric <br>grid. Schlieren tomography uses 2D ray deflection measurements to reconstruct a <br>time-varying grid of 3D refractive index values, which directly correspond to <br>physical properties of the flow. We derive a new solution for this <br>reconstruction problem that lends itself to efficient algorithms that robustly <br>work with relatively small numbers of cameras. Our physical system is easy to <br>set up, and consists of an array of relatively low cost rolling-shutter <br>camcorders that are synchronized with a new approach. We demonstrate our method <br>with real measurements, and analyze precision with synthetic data for which <br>ground truth information is available.}, BOOKTITLE = {SIGGRAPH Asia '08: ACM SIGGRAPH Asia 2008 papers}, EDITOR = {Hart, John c.}, PAGES = {132.1--132.9}, JOURNAL = {ACM Transactions on Graphics (Proc. SIGGRAPH Asia)}, VOLUME = {27}, ISSUE = {5}, ADDRESS = {Singapore}, }
Endnote
%0 Conference Proceedings %A Atcheson, Bradley %A Ihrke, Ivo %A Heidrich, Wolfgang %A Tevs, Art %A Bradley, Derek %A Magnor, Marcus %A Seidel, Hans-Peter %+ External Organizations Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society External Organizations Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Time-resolved 3D Capture of Non-stationary Gas Flows : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1D37-B %F EDOC: 427981 %R 10.1145/1457515.1409085 %U http://www.cs.ubc.ca/labs/imager/tr/2008/GasCapture/gascapture.pdf %F OTHER: Local-ID: C125756E0038A185-42313017B538F8DDC125755400566273-Atcheson:2008 %D 2008 %B ACM SIGGRAPH Asia 2008 %Z date of event: 2008-12-10 - 2008-12-13 %C Singapore %X Fluid simulation is one of the most active research areas in computer graphics. <br>However, it remains difficult to obtain measurements of real fluid flows for <br>validation of the simulated data.<br><br>In this paper, we take a step in the direction of capturing flow data for such <br>purposes. Specifically, we present the first time-resolved Schlieren tomography <br>system for capturing full 3D, non-stationary gas flows on a dense volumetric <br>grid. Schlieren tomography uses 2D ray deflection measurements to reconstruct a <br>time-varying grid of 3D refractive index values, which directly correspond to <br>physical properties of the flow. We derive a new solution for this <br>reconstruction problem that lends itself to efficient algorithms that robustly <br>work with relatively small numbers of cameras. Our physical system is easy to <br>set up, and consists of an array of relatively low cost rolling-shutter <br>camcorders that are synchronized with a new approach. We demonstrate our method <br>with real measurements, and analyze precision with synthetic data for which <br>ground truth information is available. %B SIGGRAPH Asia '08: ACM SIGGRAPH Asia 2008 papers %E Hart, John c. %P 132.1 - 132.9 %I ACM %@ 978-1-4503-1831-0 %J ACM Transactions on Graphics %V 27 %N 5 %I Association for Computing Machinery %@ false
Annen, T., Dong, Z., Mertens, T., Bekaert, P., Seidel, H.-P., and Kautz, J. 2008a. Real-time, All-frequency Shadows in Dynamic Scenes. ACM Transactions on Graphics (Proc. SIGGRAPH 2008), ACM.
Export
BibTeX
@inproceedings{Annen-et-al_SIGGRAPH08, TITLE = {Real-time, All-frequency Shadows in Dynamic Scenes}, AUTHOR = {Annen, Thomas and Dong, Zhao and Mertens, Tom and Bekaert, Philippe and Seidel, Hans-Peter and Kautz, Jan}, LANGUAGE = {eng}, ISSN = {0730-0301}, ISBN = {978-1-4503-0112-1}, URL = {http://www.mpi-inf.mpg.de/~dong/download/SIG08_CSSM.pdf}, DOI = {10.1145/1360612.1360633}, LOCALID = {Local-ID: C125756E0038A185-64CC19EC463F8883C12574640040C82B-ADM:SIGGRAPH:CSSM:2008}, PUBLISHER = {ACM}, PUBLISHER = {Association for Computing Machinery}, YEAR = {2008}, DATE = {2008}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2008}, EDITOR = {Turk, Greg}, PAGES = {34.1--34.8}, JOURNAL = {ACM Transactions on Graphics (Proc. SIGGRAPH)}, VOLUME = {27}, ISSUE = {3}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Annen, Thomas %A Dong, Zhao %A Mertens, Tom %A Bekaert, Philippe %A Seidel, Hans-Peter %A Kautz, Jan %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Real-time, All-frequency Shadows in Dynamic Scenes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1CC3-A %F EDOC: 427977 %R 10.1145/1360612.1360633 %U http://www.mpi-inf.mpg.de/~dong/download/SIG08_CSSM.pdf %F OTHER: Local-ID: C125756E0038A185-64CC19EC463F8883C12574640040C82B-ADM:SIGGRAPH:CSSM:2008 %D 2008 %B ACM SIGGRAPH 2008 %Z date of event: 2008-08-11 - 2008-08-15 %C Los Angeles, CA, USA %B Proceedings of ACM SIGGRAPH 2008 %E Turk, Greg %P 34.1 - 34.8 %I ACM %@ 978-1-4503-0112-1 %J ACM Transactions on Graphics %V 27 %N 3 %I Association for Computing Machinery %@ false
Annen, T., Mertens, T., Seidel, H.-P., Flerackers, E., and Kautz, J. 2008b. Exponential Shadow Maps. Proceedings of the Graphics Interface 2008 (GI 2008), Canadian Information Processing Society.
Export
BibTeX
@inproceedings{Annen-et-al_GI08, TITLE = {Exponential Shadow Maps}, AUTHOR = {Annen, Thomas and Mertens, Tom and Seidel, Hans-Peter and Flerackers, Eddy and Kautz, Jan}, LANGUAGE = {eng}, ISBN = {978-1-56881-423-0}, LOCALID = {Local-ID: C125756E0038A185-D2E6A2288B5D5353C12573FB0048030B-Annen:GI:ESM:2008}, PUBLISHER = {Canadian Information Processing Society}, YEAR = {2008}, DATE = {2008}, BOOKTITLE = {Proceedings of the Graphics Interface 2008 (GI 2008)}, EDITOR = {Shaw, Chris and Bartram, Lyn}, PAGES = {155--161}, SERIES = {ACM International Conference Proceeding Series}, ADDRESS = {Windsor, Ontario, Canada}, }
Endnote
%0 Conference Proceedings %A Annen, Thomas %A Mertens, Tom %A Seidel, Hans-Peter %A Flerackers, Eddy %A Kautz, Jan %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Exponential Shadow Maps : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1BA1-9 %F EDOC: 427978 %F OTHER: Local-ID: C125756E0038A185-D2E6A2288B5D5353C12573FB0048030B-Annen:GI:ESM:2008 %D 2008 %B Graphics Interface 2008 %Z date of event: 2008-05-28 - 2008-05-30 %C Windsor, Ontario, Canada %B Proceedings of the Graphics Interface 2008 %E Shaw, Chris; Bartram, Lyn %P 155 - 161 %I Canadian Information Processing Society %@ 978-1-56881-423-0 %B ACM International Conference Proceeding Series
Annen, T., Theisel, H., Rössl, C., Ziegler, G., and Seidel, H.-P. 2008c. Vector Field Contours. Proceedings of the Graphics Interface 2008, Canadian Information Processing Society.
Export
BibTeX
@inproceedings{Theisel-et-al_GI08, TITLE = {Vector Field Contours}, AUTHOR = {Annen, Thomas and Theisel, Holger and R{\"o}ssl, Christian and Ziegler, Gernot and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-56881-423-0}, LOCALID = {Local-ID: C125756E0038A185-F4C701CA15586686C12573FB0049272A-Annen:GI:VFC:2008}, PUBLISHER = {Canadian Information Processing Society}, YEAR = {2008}, DATE = {2008}, BOOKTITLE = {Proceedings of the Graphics Interface 2008}, EDITOR = {Shaw, Chris and Bartram, Lyn}, PAGES = {97--105}, SERIES = {ACM International Conference Proceeding Series}, ADDRESS = {Windsor, Ontario, Canada}, }
Endnote
%0 Conference Proceedings %A Annen, Thomas %A Theisel, Holger %A R&#246;ssl, Christian %A Ziegler, Gernot %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Vector Field Contours : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1D56-5 %F EDOC: 427979 %F OTHER: Local-ID: C125756E0038A185-F4C701CA15586686C12573FB0049272A-Annen:GI:VFC:2008 %D 2008 %B Graphics Interface 2008 %Z date of event: 2008-05-28 - 2008-05-30 %C Windsor, Ontario, Canada %B Proceedings of the Graphics Interface 2008 %E Shaw, Chris; Bartram, Lyn %P 97 - 105 %I Canadian Information Processing Society %@ 978-1-56881-423-0 %B ACM International Conference Proceeding Series
Ajdin, B., Hullin, M.B., Fuchs, C., Seidel, H.-P., and Lensch, H.P.A. 2008. Demosaicing by Smoothing along 1D Features. IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2008), IEEE Computer Society.
Abstract
Most digital cameras capture color pictures in the form of an image mosaic, <br>recording only one color channel at each pixel position. Therefore, an <br>interpolation algorithm needs to be applied to reconstruct the missing color <br>information. In this paper we present a novel Bayer pattern demosaicing <br>approach, employing stochastic global optimization performed on a pixel <br>neighborhood. We are minimizing a newly developed cost function that increases <br>smoothness along one-dimensional image features. While previous algorithms have <br>been developed focusing on LDR images only, our optimization scheme and the <br>underlying cost function are designed to handle both LDR and HDR images, <br>creating less demosaicing artifacts, compared to previous approaches.
Export
BibTeX
@inproceedings{Ajdin-et-al_CVPR08, TITLE = {Demosaicing by Smoothing along {1D} Features}, AUTHOR = {Ajdin, Boris and Hullin, Matthias B. and Fuchs, Christian and Seidel, Hans-Peter and Lensch, Hendrik P. A.}, LANGUAGE = {eng}, ISBN = {978-1-4244-2242-5}, URL = {http://www.mpi-inf.mpg.de/~bajdin/CVPR_final_ver1.pdf}, DOI = {10.1109/CVPR.2008.4587653}, LOCALID = {Local-ID: C125756E0038A185-E686CD2042F87A2AC125755500477C99-Ajdin2008}, PUBLISHER = {IEEE Computer Society}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {Most digital cameras capture color pictures in the form of an image mosaic, <br>recording only one color channel at each pixel position. Therefore, an <br>interpolation algorithm needs to be applied to reconstruct the missing color <br>information. In this paper we present a novel Bayer pattern demosaicing <br>approach, employing stochastic global optimization performed on a pixel <br>neighborhood. We are minimizing a newly developed cost function that increases <br>smoothness along one-dimensional image features. While previous algorithms have <br>been developed focusing on LDR images only, our optimization scheme and the <br>underlying cost function are designed to handle both LDR and HDR images, <br>creating less demosaicing artifacts, compared to previous approaches.}, BOOKTITLE = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2008)}, PAGES = {2423--2430}, ADDRESS = {Anchorage, AK, USA}, }
Endnote
%0 Conference Proceedings %A Ajdin, Boris %A Hullin, Matthias B. %A Fuchs, Christian %A Seidel, Hans-Peter %A Lensch, Hendrik P. A. %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Demosaicing by Smoothing along 1D Features : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1B65-F %F EDOC: 427963 %R 10.1109/CVPR.2008.4587653 %U http://www.mpi-inf.mpg.de/~bajdin/CVPR_final_ver1.pdf %F OTHER: Local-ID: C125756E0038A185-E686CD2042F87A2AC125755500477C99-Ajdin2008 %D 2008 %B 2008 IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2008-06-23 - 2008-06-28 %C Anchorage, AK, USA %X Most digital cameras capture color pictures in the form of an image mosaic, <br>recording only one color channel at each pixel position. Therefore, an <br>interpolation algorithm needs to be applied to reconstruct the missing color <br>information. In this paper we present a novel Bayer pattern demosaicing <br>approach, employing stochastic global optimization performed on a pixel <br>neighborhood. We are minimizing a newly developed cost function that increases <br>smoothness along one-dimensional image features. While previous algorithms have <br>been developed focusing on LDR images only, our optimization scheme and the <br>underlying cost function are designed to handle both LDR and HDR images, <br>creating less demosaicing artifacts, compared to previous approaches. %B IEEE Conference on Computer Vision and Pattern Recognition %P 2423 - 2430 %I IEEE Computer Society %@ 978-1-4244-2242-5
Ahmed, N., Theobalt, C., Rössl, C., Thrun, S., and Seidel, H.-P. 2008a. Dense Correspondence Finding for Parametrization-free Animation Reconstruction from Video. IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2008), IEEE Computer Society.
Abstract
We present a dense 3D correspondence finding method<br>that enables spatio-temporally coherent reconstruction of<br>surface animations from multi-view video data. Given as input<br>a sequence of shape-from-silhouette volumes of a moving<br>subject that were reconstructed for each time frame individually,<br>our method establishes dense surface correspondences<br>between subsequent shapes independently of surface<br>discretization. This is achieved in two steps: first, we obtain<br>sparse correspondences from robust optical features<br>between adjacent frames. Second, we generate dense correspondences<br>which serve as map between respective surfaces.<br>By applying this procedure subsequently to all pairs<br>of time steps we can trivially align one shape with all others.<br>Thus, the original input can be reconstructed as a sequence<br>of meshes with constant connectivity and small tangential<br>distortion. We exemplify the performance and accuracy of<br>our method using several synthetic and captured real-world<br>sequences.
Export
BibTeX
@inproceedings{Ahmed-et-al_CVPR08, TITLE = {Dense Correspondence Finding for Parametrization-free Animation Reconstruction from Video}, AUTHOR = {Ahmed, Naveed and Theobalt, Christian and R{\"o}ssl, Christian and Thrun, Sebastian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4244-2242-5}, URL = {http://dx.doi.org/10.1109/CVPR.2008.4587758}, DOI = {10.1109/CVPR.2008.4587758}, LOCALID = {Local-ID: C125756E0038A185-052B4E5D12A0B04EC12574190040B3E5-NaveedCVPR08a}, PUBLISHER = {IEEE Computer Society}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {We present a dense 3D correspondence finding method<br>that enables spatio-temporally coherent reconstruction of<br>surface animations from multi-view video data. Given as input<br>a sequence of shape-from-silhouette volumes of a moving<br>subject that were reconstructed for each time frame individually,<br>our method establishes dense surface correspondences<br>between subsequent shapes independently of surface<br>discretization. This is achieved in two steps: first, we obtain<br>sparse correspondences from robust optical features<br>between adjacent frames. Second, we generate dense correspondences<br>which serve as map between respective surfaces.<br>By applying this procedure subsequently to all pairs<br>of time steps we can trivially align one shape with all others.<br>Thus, the original input can be reconstructed as a sequence<br>of meshes with constant connectivity and small tangential<br>distortion. We exemplify the performance and accuracy of<br>our method using several synthetic and captured real-world<br>sequences.}, BOOKTITLE = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2008)}, PAGES = {1--8}, ADDRESS = {Anchorage, AK, USA}, }
Endnote
%0 Conference Proceedings %A Ahmed, Naveed %A Theobalt, Christian %A R&#246;ssl, Christian %A Thrun, Sebastian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Dense Correspondence Finding for Parametrization-free Animation Reconstruction from Video : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1B67-B %F EDOC: 427960 %R 10.1109/CVPR.2008.4587758 %U http://dx.doi.org/10.1109/CVPR.2008.4587758 %F OTHER: Local-ID: C125756E0038A185-052B4E5D12A0B04EC12574190040B3E5-NaveedCVPR08a %D 2008 %B 2008 IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2008-06-23 - 2008-06-28 %C Anchorage, AK, USA %X We present a dense 3D correspondence finding method<br>that enables spatio-temporally coherent reconstruction of<br>surface animations from multi-view video data. Given as input<br>a sequence of shape-from-silhouette volumes of a moving<br>subject that were reconstructed for each time frame individually,<br>our method establishes dense surface correspondences<br>between subsequent shapes independently of surface<br>discretization. This is achieved in two steps: first, we obtain<br>sparse correspondences from robust optical features<br>between adjacent frames. Second, we generate dense correspondences<br>which serve as map between respective surfaces.<br>By applying this procedure subsequently to all pairs<br>of time steps we can trivially align one shape with all others.<br>Thus, the original input can be reconstructed as a sequence<br>of meshes with constant connectivity and small tangential<br>distortion. We exemplify the performance and accuracy of<br>our method using several synthetic and captured real-world<br>sequences. %B IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2008) %P 1 - 8 %I IEEE Computer Society %@ 978-1-4244-2242-5
Ahmed, N., Theobalt, C., Dobrev, P., Seidel, H.-P., and Thrun, S. 2008b. Robust Fusion of Dynamic Shape and Normal Capture for High-quality Reconstruction of Time-varying Geometry. IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2008), IEEE Computer Society.
Abstract
We present a new passive approach to capture time-varying scene geometry in <br>large acquisition volumes from multi-view video. It can be applied to <br>reconstruct complete moving models of human actors that feature even slightest <br>dynamic geometry detail, such as wrinkles and folds in clothing, and that can <br>be viewed from 360 degrees. Starting from multi-view video streams recorded <br>under calibrated lighting, we first perform marker-less human motion capture <br>based on a smooth template with no high-frequency surface detail. Subsequently, <br>surface reflectance and time-varying normal fields are estimated based on the <br>coarse template shape. The main contribution of this work is a new statistical <br>approach to solve the non-trivial problem of transforming the captured normal <br>field that is defined over the smooth non-planar 3D template into true 3D <br>displacements. Our spatio-temporal reconstruction method outputs displaced <br>geometry that is accurate at each time step of video and temporally smooth, <br>even if the input data are affected by noise.
Export
BibTeX
@inproceedings{Ahmed-et-al_CVPR08, TITLE = {Robust Fusion of Dynamic Shape and Normal Capture for High-quality Reconstruction of Time-varying Geometry}, AUTHOR = {Ahmed, Naveed and Theobalt, Christian and Dobrev, Petar and Seidel, Hans-Peter and Thrun, Sebastian}, LANGUAGE = {eng}, ISBN = {978-1-4244-2242-5}, URL = {http://dx.doi.org/10.1109/CVPR.2008.4587696}, DOI = {10.1109/CVPR.2008.4587696}, LOCALID = {Local-ID: C125756E0038A185-2F094A743E8137DAC125755B007C439C-NaveedCVPR08b}, PUBLISHER = {IEEE Computer Society}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {We present a new passive approach to capture time-varying scene geometry in <br>large acquisition volumes from multi-view video. It can be applied to <br>reconstruct complete moving models of human actors that feature even slightest <br>dynamic geometry detail, such as wrinkles and folds in clothing, and that can <br>be viewed from 360 degrees. Starting from multi-view video streams recorded <br>under calibrated lighting, we first perform marker-less human motion capture <br>based on a smooth template with no high-frequency surface detail. Subsequently, <br>surface reflectance and time-varying normal fields are estimated based on the <br>coarse template shape. The main contribution of this work is a new statistical <br>approach to solve the non-trivial problem of transforming the captured normal <br>field that is defined over the smooth non-planar 3D template into true 3D <br>displacements. Our spatio-temporal reconstruction method outputs displaced <br>geometry that is accurate at each time step of video and temporally smooth, <br>even if the input data are affected by noise.}, BOOKTITLE = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2008)}, PAGES = {1--8}, ADDRESS = {Anchorage, AK, USA}, }
Endnote
%0 Conference Proceedings %A Ahmed, Naveed %A Theobalt, Christian %A Dobrev, Petar %A Seidel, Hans-Peter %A Thrun, Sebastian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Robust Fusion of Dynamic Shape and Normal Capture for High-quality Reconstruction of Time-varying Geometry : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1CDB-5 %F EDOC: 427959 %R 10.1109/CVPR.2008.4587696 %U http://dx.doi.org/10.1109/CVPR.2008.4587696 %F OTHER: Local-ID: C125756E0038A185-2F094A743E8137DAC125755B007C439C-NaveedCVPR08b %D 2008 %B 2008 IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2008-06-23 - 2008-06-28 %C Anchorage, AK, USA %X We present a new passive approach to capture time-varying scene geometry in <br>large acquisition volumes from multi-view video. It can be applied to <br>reconstruct complete moving models of human actors that feature even slightest <br>dynamic geometry detail, such as wrinkles and folds in clothing, and that can <br>be viewed from 360 degrees. Starting from multi-view video streams recorded <br>under calibrated lighting, we first perform marker-less human motion capture <br>based on a smooth template with no high-frequency surface detail. Subsequently, <br>surface reflectance and time-varying normal fields are estimated based on the <br>coarse template shape. The main contribution of this work is a new statistical <br>approach to solve the non-trivial problem of transforming the captured normal <br>field that is defined over the smooth non-planar 3D template into true 3D <br>displacements. Our spatio-temporal reconstruction method outputs displaced <br>geometry that is accurate at each time step of video and temporally smooth, <br>even if the input data are affected by noise. %B IEEE Conference on Computer Vision and Pattern Recognition %P 1 - 8 %I IEEE Computer Society %@ 978-1-4244-2242-5
Adams, B., Ovsjanikov, M., Wand, M., Seidel, H.-P., and Guibas, L. 2008. Meshless Modeling of Deformable Shapes and their Motion. SCA 08: Eurographics/SIGGRAPH Symposium on Computer Animation, Eurographics Association.
Export
BibTeX
@inproceedings{Adams-et-al_SCA08, TITLE = {Meshless Modeling of Deformable Shapes and their Motion}, AUTHOR = {Adams, Bart and Ovsjanikov, Maksim and Wand, Michael and Seidel, Hans-Peter and Guibas, Leonidas}, LANGUAGE = {eng}, ISBN = {978-3-905674-10-1}, DOI = {10.2312/SCA/SCA08/077-086}, LOCALID = {Local-ID: C125756E0038A185-7772BC02CCEBA474C125754A00480F60-Adams2008}, PUBLISHER = {Eurographics Association}, YEAR = {2009}, DATE = {2008}, BOOKTITLE = {SCA 08: Eurographics/SIGGRAPH Symposium on Computer Animation}, EDITOR = {Fellner, Dieter and Spencer, Stephen}, PAGES = {77--86}, ADDRESS = {Dublin, Ireland}, }
Endnote
%0 Conference Proceedings %A Adams, Bart %A Ovsjanikov, Maksim %A Wand, Michael %A Seidel, Hans-Peter %A Guibas, Leonidas %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Meshless Modeling of Deformable Shapes and their Motion : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1C37-4 %F EDOC: 427958 %F OTHER: Local-ID: C125756E0038A185-7772BC02CCEBA474C125754A00480F60-Adams2008 %R 10.2312/SCA/SCA08/077-086 %D 2008 %B SCA 08: Eurographics/SIGGRAPH Symposium on Computer Animation %Z date of event: 2009-01-26 - 2009-01-26 %C Dublin, Ireland %B SCA 08: Eurographics/SIGGRAPH Symposium on Computer Animation %E Fellner, Dieter; Spencer, Stephen %P 77 - 86 %I Eurographics Association %@ 978-3-905674-10-1
2007
Ziegler, G., Theobalt, C., Ihrke, I., Magnor, M.A., Tevs, A., and Seidel, H.-P. 2007a. GPU-based Light Wavefront Simulation for Real-time Refractive Object Rendering. SIGGRAPH ’07: ACM SIGGRAPH 2007 sketches, ACM.
Export
BibTeX
@inproceedings{DBLP:conf/siggraph/ZieglerTIMTS07, TITLE = {{GPU}-based Light Wavefront Simulation for Real-time Refractive Object Rendering}, AUTHOR = {Ziegler, Gernot and Theobalt, Christian and Ihrke, Ivo and Magnor, Marcus A. and Tevs, Art and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4503-4726-6}, DOI = {10.1145/1278780.1278846}, PUBLISHER = {ACM}, YEAR = {2007}, DATE = {2007}, BOOKTITLE = {SIGGRAPH '07: ACM SIGGRAPH 2007 sketches}, EDITOR = {Alexa, Marc and Finkelstein, Adam}, PAGES = {54--54}, ADDRESS = {San Diego, CA, USA}, }
Endnote
%0 Conference Proceedings %A Ziegler, Gernot %A Theobalt, Christian %A Ihrke, Ivo %A Magnor, Marcus A. %A Tevs, Art %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T GPU-based Light Wavefront Simulation for Real-time Refractive Object Rendering : %G eng %U http://hdl.handle.net/21.11116/0000-000F-51E7-1 %R 10.1145/1278780.1278846 %D 2007 %B International Conference on Computer Graphics and Interactive Techniques %Z date of event: 2007-08-05 - 2007-08-09 %C San Diego, CA, USA %B SIGGRAPH '07: ACM SIGGRAPH 2007 sketches %E Alexa, Marc; Finkelstein, Adam %P 54 - 54 %I ACM %@ 978-1-4503-4726-6
Ziegler, G., Dimitrov, R., Theobalt, C., and Seidel, H.-P. 2007b. Real-time Quadtree Analysis using HistoPyramids. Real-Time Image Processing 2007, SPIE.
Abstract
Region quadtrees are convenient tools for hierarchical image analysis. Like the <br>related Haar wavelets, they are simple to generate within a fixed calculation <br>time. The clustering at each resolution level requires only local data, yet <br>they deliver intuitive classification results. Although the region quadtree <br>partitioning is very rigid, it can be rapidly computed from arbitrary imagery. <br>This research article demonstrates how graphics hardware can be utilized to <br>build region quadtrees at unprecedented speeds. To achieve this, a <br>data-structure called HistoPyramid registers the number of desired image <br>features in a pyramidal 2D array. Then, this HistoPyramid is used as an <br>implicit indexing data structure through quadtree traversal, creating lists of <br>the registered image features directly in GPU memory, and virtually eliminating <br>bus transfers between CPU and GPU. With this novel concept, quadtrees can be <br>applied in real-time video processing on standard PC hardware. A multitude of <br>applications in image and video processing arises, since region quadtree <br>analysis becomes a light-weight preprocessing step for feature clustering in <br>vision tasks, motion vector analysis, PDE calculations, or data compression. In <br>a sidenote, we outline how this algorithm can be applied to 3D volume data, <br>effectively generating region octrees purely on graphics hardware.
Export
BibTeX
@inproceedings{Ziegler-et-al_RTIP07, TITLE = {Real-time Quadtree Analysis using {HistoPyramids}}, AUTHOR = {Ziegler, Gernot and Dimitrov, Rouslan and Theobalt, Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {9780819466099}, DOI = {10.1117/12.703089}, LOCALID = {Local-ID: C12573CC004A8E26-31C1456C0CD2DA4BC1257209007E4F1D-ZTDS2005}, PUBLISHER = {SPIE}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {Region quadtrees are convenient tools for hierarchical image analysis. Like the <br>related Haar wavelets, they are simple to generate within a fixed calculation <br>time. The clustering at each resolution level requires only local data, yet <br>they deliver intuitive classification results. Although the region quadtree <br>partitioning is very rigid, it can be rapidly computed from arbitrary imagery. <br>This research article demonstrates how graphics hardware can be utilized to <br>build region quadtrees at unprecedented speeds. To achieve this, a <br>data-structure called HistoPyramid registers the number of desired image <br>features in a pyramidal 2D array. Then, this HistoPyramid is used as an <br>implicit indexing data structure through quadtree traversal, creating lists of <br>the registered image features directly in GPU memory, and virtually eliminating <br>bus transfers between CPU and GPU. With this novel concept, quadtrees can be <br>applied in real-time video processing on standard PC hardware. A multitude of <br>applications in image and video processing arises, since region quadtree <br>analysis becomes a light-weight preprocessing step for feature clustering in <br>vision tasks, motion vector analysis, PDE calculations, or data compression. In <br>a sidenote, we outline how this algorithm can be applied to 3D volume data, <br>effectively generating region octrees purely on graphics hardware.}, BOOKTITLE = {Real-Time Image Processing 2007}, EDITOR = {Kehtarnavaz, Nasser and Carlsohn, Matthias F.}, PAGES = {1--11}, SERIES = {SPIE}, VOLUME = {6496}, ADDRESS = {San Jose, CA, USA}, }
Endnote
%0 Conference Proceedings %A Ziegler, Gernot %A Dimitrov, Rouslan %A Theobalt, Christian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Programming Logics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Real-time Quadtree Analysis using HistoPyramids : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2076-6 %F EDOC: 356610 %R 10.1117/12.703089 %F OTHER: Local-ID: C12573CC004A8E26-31C1456C0CD2DA4BC1257209007E4F1D-ZTDS2005 %D 2007 %B Real-Time Image Processing 2007 %Z date of event: 2007-01-28 - 2007-02-01 %C San Jose, CA, USA %X Region quadtrees are convenient tools for hierarchical image analysis. Like the <br>related Haar wavelets, they are simple to generate within a fixed calculation <br>time. The clustering at each resolution level requires only local data, yet <br>they deliver intuitive classification results. Although the region quadtree <br>partitioning is very rigid, it can be rapidly computed from arbitrary imagery. <br>This research article demonstrates how graphics hardware can be utilized to <br>build region quadtrees at unprecedented speeds. To achieve this, a <br>data-structure called HistoPyramid registers the number of desired image <br>features in a pyramidal 2D array. Then, this HistoPyramid is used as an <br>implicit indexing data structure through quadtree traversal, creating lists of <br>the registered image features directly in GPU memory, and virtually eliminating <br>bus transfers between CPU and GPU. With this novel concept, quadtrees can be <br>applied in real-time video processing on standard PC hardware. A multitude of <br>applications in image and video processing arises, since region quadtree <br>analysis becomes a light-weight preprocessing step for feature clustering in <br>vision tasks, motion vector analysis, PDE calculations, or data compression. In <br>a sidenote, we outline how this algorithm can be applied to 3D volume data, <br>effectively generating region octrees purely on graphics hardware. %B Real-Time Image Processing 2007 %E Kehtarnavaz, Nasser; Carlsohn, Matthias F. %P 1 - 11 %I SPIE %@ 9780819466099 %B SPIE %N 6496
Zayer, R., Lévy, B., and Seidel, H.-P. 2007. Linear Angle Based Parameterization. SGP 2007, Fifth Eurographics Symposium on Geometry Processing, Eurographics Association.
Abstract
In the field of mesh parameterization, the impact of angular and boundary distortion on parameterization quality have brought forward the need for robust and efficient free boundary angle preserving methods. One of the most prominent approaches in this direction is the Angle Based Flattening (ABF) which directly formulates the problem as a constrained nonlinear optimization in terms of angles. Since the original formulation of the ABF, a steady research effort has been dedicated to improving its efficiency. As for any well posed numerical problem, the solution is generally an approximation of the underlying mathematical equations. The economy and accuracy of the solution are to a great extent affected by the kind of approximation used. In this work we reformulate the problem based on the notion of error of estimation. A careful manipulation of the resulting equations yields for the first time a linear version of angle based parameterization. The error induced by this linearization is quadratic in terms of the error in angles and the validity of the approximation is further supported by numerical results. Besides performance speedup, the simplicity of the current setup makes re-implementation and reproduction of our results straightforward.
Export
BibTeX
@inproceedings{zls:linabp:07, TITLE = {Linear Angle Based Parameterization}, AUTHOR = {Zayer, Rhaleb and L{\'e}vy, Bruno and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-905673-46-3}, DOI = {10.2312/SGP/SGP07/135-141}, LOCALID = {Local-ID: C12573CC004A8E26-D8DFAA1EB1922B82C12572F4003CDE39-zls:linabp:07}, PUBLISHER = {Eurographics Association}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {In the field of mesh parameterization, the impact of angular and boundary distortion on parameterization quality have brought forward the need for robust and efficient free boundary angle preserving methods. One of the most prominent approaches in this direction is the Angle Based Flattening (ABF) which directly formulates the problem as a constrained nonlinear optimization in terms of angles. Since the original formulation of the ABF, a steady research effort has been dedicated to improving its efficiency. As for any well posed numerical problem, the solution is generally an approximation of the underlying mathematical equations. The economy and accuracy of the solution are to a great extent affected by the kind of approximation used. In this work we reformulate the problem based on the notion of error of estimation. A careful manipulation of the resulting equations yields for the first time a linear version of angle based parameterization. The error induced by this linearization is quadratic in terms of the error in angles and the validity of the approximation is further supported by numerical results. Besides performance speedup, the simplicity of the current setup makes re-implementation and reproduction of our results straightforward.}, BOOKTITLE = {SGP 2007, Fifth Eurographics Symposium on Geometry Processing}, EDITOR = {Fellner, Dieter and Spencer, Stephen}, PAGES = {135--141}, ADDRESS = {Barcelona, Spain}, }
Endnote
%0 Conference Proceedings %A Zayer, Rhaleb %A L&#233;vy, Bruno %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Linear Angle Based Parameterization : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1FB6-E %F EDOC: 356557 %R 10.2312/SGP/SGP07/135-141 %F OTHER: Local-ID: C12573CC004A8E26-D8DFAA1EB1922B82C12572F4003CDE39-zls:linabp:07 %D 2007 %B Fifth Eurographics Symposium on Geometry Processing %Z date of event: 2007-07-04 - 2007-07-06 %C Barcelona, Spain %X In the field of mesh parameterization, the impact of angular and boundary distortion on parameterization quality have brought forward the need for robust and efficient free boundary angle preserving methods. One of the most prominent approaches in this direction is the Angle Based Flattening (ABF) which directly formulates the problem as a constrained nonlinear optimization in terms of angles. Since the original formulation of the ABF, a steady research effort has been dedicated to improving its efficiency. As for any well posed numerical problem, the solution is generally an approximation of the underlying mathematical equations. The economy and accuracy of the solution are to a great extent affected by the kind of approximation used. In this work we reformulate the problem based on the notion of error of estimation. A careful manipulation of the resulting equations yields for the first time a linear version of angle based parameterization. The error induced by this linearization is quadratic in terms of the error in angles and the validity of the approximation is further supported by numerical results. Besides performance speedup, the simplicity of the current setup makes re-implementation and reproduction of our results straightforward. %B SGP 2007 %E Fellner, Dieter; Spencer, Stephen %P 135 - 141 %I Eurographics Association %@ 978-3-905673-46-3
Yoshizawa, S., Belyaev, A., Yokoto, H., and Seidel, H.-P. 2007a. Fast and Faithful Geometric Algorithm for Detecting Crest Lines on Meshes. Pacific Graphics 2007 (PG 2007), IEEE Computer Society.
Export
BibTeX
@inproceedings{YoshizawaPG2007, TITLE = {Fast and Faithful Geometric Algorithm for Detecting Crest Lines on Meshes}, AUTHOR = {Yoshizawa, Shin and Belyaev, Alexander and Yokoto, Hideo and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1550-4085}, ISBN = {0-7695-3009-5; 978-0-7695-3009-3}, DOI = {10.1109/PG.2007.24}, PUBLISHER = {IEEE Computer Society}, YEAR = {2007}, DATE = {2007}, BOOKTITLE = {Pacific Graphics 2007 (PG 2007)}, EDITOR = {Alexa, Marc and Gortler, Steven and Ju, Tao}, PAGES = {231--237}, ADDRESS = {Maui, Hawaii, USA}, }
Endnote
%0 Conference Proceedings %A Yoshizawa, Shin %A Belyaev, Alexander %A Yokoto, Hideo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Fast and Faithful Geometric Algorithm for Detecting Crest Lines on Meshes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0023-D1AC-2 %R 10.1109/PG.2007.24 %D 2007 %B The Pacific Conference on Computer Graphics and Applications %Z date of event: 2007-10-29 - 2007-11-02 %C Maui, Hawaii, USA %B Pacific Graphics 2007 %E Alexa, Marc; Gortler, Steven; Ju, Tao %P 231 - 237 %I IEEE Computer Society %@ false
Yoshizawa, S., Belyaev, A., and Seidel, H.-P. 2007b. Skeleton-based Variational Mesh Deformations. Computer Graphics Forum (Proc. EUROGRAPHICS 2007), Blackwell.
Export
BibTeX
@inproceedings{YoshizawaEG2007, TITLE = {Skeleton-based Variational Mesh Deformations}, AUTHOR = {Yoshizawa, Shin and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2007.01047.x}, PUBLISHER = {Blackwell}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2007}, DATE = {2007}, BOOKTITLE = {Eurographics 2007}, EDITOR = {Cohen-Or, Daniel and Slav{\'i}k, Pavel}, PAGES = {255--264}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {26}, ISSUE = {3}, ADDRESS = {Prague, Czech Republic}, }
Endnote
%0 Conference Proceedings %A Yoshizawa, Shin %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Skeleton-based Variational Mesh Deformations : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0023-D1AE-D %R 10.1111/j.1467-8659.2007.01047.x %D 2007 %B Eurographics 2007 %Z date of event: 2007-09-03 - 2007-09-07 %C Prague, Czech Republic %B Eurographics 2007 %E Cohen-Or, Daniel; Slav&#237;k, Pavel %P 255 - 264 %I Blackwell %J Computer Graphics Forum %V 26 %N 3 %I Blackwell-Wiley %@ false
Yoshida, A., Blanz, V., Myszkowski, K., and Seidel, H.-P. 2007a. Testing Tone Mapping Operators with Human-perceived Reality. IEEE Transactions on Medical Imaging16, 1.
Abstract
A number of successful tone mapping operators for contrast compression have <br>been proposed due to the need to visualize high dynamic range (HDR) images on <br>low dynamic range (LDR) devices. They were inspired by fields as diverse as <br>image processing, photographic practice, and modeling of the human visual <br>systems (HVS). The variety of approaches calls for a systematic perceptual <br>evaluation of their performance.<br><br>We conduct a psychophysical experiment based on a direct comparison between the <br>appearance of real-world scenes and HDR images of these scenes displayed on an <br>LDR monitor. In our experiment, HDR images are tone mapped by seven existing <br>tone mapping operators. The primary interest of this psychophysical experiment <br>is to assess the differences in how tone mapped images are perceived by human <br>observers and to find out which attributes of image appearance account for <br>these differences when tone mapped images are compared directly with their <br>corresponding real-world scenes rather than with each other. The human<br>subjects rate image naturalness, overall contrast, overall brightness, and <br>detail reproduction in dark and bright image regions with respect to the <br>corresponding real-world scene.<br><br>The results indicate substantial differences in perception of images produced <br>by individual tone mapping operators. We observe a clear distinction between <br>global and local operators in favor of the latter, and we classify the tone <br>mapping operators according to naturalness and appearance attributes.
Export
BibTeX
@article{Yoshida-et-al_JEI07, TITLE = {Testing Tone Mapping Operators with Human-perceived Reality}, AUTHOR = {Yoshida, Akiko and Blanz, Volker and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0278-0062}, DOI = {10.1117/1.2711822}, LOCALID = {Local-ID: C12573CC004A8E26-1BC207A1242FDBC1C1257222003A5012-Yoshida_JEI2007}, PUBLISHER = {Institute of Electrical and Electronics Engineers}, ADDRESS = {New York, NY}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {A number of successful tone mapping operators for contrast compression have <br>been proposed due to the need to visualize high dynamic range (HDR) images on <br>low dynamic range (LDR) devices. They were inspired by fields as diverse as <br>image processing, photographic practice, and modeling of the human visual <br>systems (HVS). The variety of approaches calls for a systematic perceptual <br>evaluation of their performance.<br><br>We conduct a psychophysical experiment based on a direct comparison between the <br>appearance of real-world scenes and HDR images of these scenes displayed on an <br>LDR monitor. In our experiment, HDR images are tone mapped by seven existing <br>tone mapping operators. The primary interest of this psychophysical experiment <br>is to assess the differences in how tone mapped images are perceived by human <br>observers and to find out which attributes of image appearance account for <br>these differences when tone mapped images are compared directly with their <br>corresponding real-world scenes rather than with each other. The human<br>subjects rate image naturalness, overall contrast, overall brightness, and <br>detail reproduction in dark and bright image regions with respect to the <br>corresponding real-world scene.<br><br>The results indicate substantial differences in perception of images produced <br>by individual tone mapping operators. We observe a clear distinction between <br>global and local operators in favor of the latter, and we classify the tone <br>mapping operators according to naturalness and appearance attributes.}, JOURNAL = {IEEE Transactions on Medical Imaging}, VOLUME = {16}, NUMBER = {1}, PAGES = {1--14}, EID = {013004}, }
Endnote
%0 Journal Article %A Yoshida, Akiko %A Blanz, Volker %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Testing Tone Mapping Operators with Human-perceived Reality : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-20EF-9 %F EDOC: 356603 %R 10.1117/1.2711822 %F OTHER: Local-ID: C12573CC004A8E26-1BC207A1242FDBC1C1257222003A5012-Yoshida_JEI2007 %D 2007 %* Review method: peer-reviewed %X A number of successful tone mapping operators for contrast compression have <br>been proposed due to the need to visualize high dynamic range (HDR) images on <br>low dynamic range (LDR) devices. They were inspired by fields as diverse as <br>image processing, photographic practice, and modeling of the human visual <br>systems (HVS). The variety of approaches calls for a systematic perceptual <br>evaluation of their performance.<br><br>We conduct a psychophysical experiment based on a direct comparison between the <br>appearance of real-world scenes and HDR images of these scenes displayed on an <br>LDR monitor. In our experiment, HDR images are tone mapped by seven existing <br>tone mapping operators. The primary interest of this psychophysical experiment <br>is to assess the differences in how tone mapped images are perceived by human <br>observers and to find out which attributes of image appearance account for <br>these differences when tone mapped images are compared directly with their <br>corresponding real-world scenes rather than with each other. The human<br>subjects rate image naturalness, overall contrast, overall brightness, and <br>detail reproduction in dark and bright image regions with respect to the <br>corresponding real-world scene.<br><br>The results indicate substantial differences in perception of images produced <br>by individual tone mapping operators. We observe a clear distinction between <br>global and local operators in favor of the latter, and we classify the tone <br>mapping operators according to naturalness and appearance attributes. %J IEEE Transactions on Medical Imaging %O IEEE Trans. Med. Imaging %V 16 %N 1 %& 1 %P 1 - 14 %Z sequence number: 013004 %I Institute of Electrical and Electronics Engineers %C New York, NY %@ false
Yoshida, A., Krawczyk, G., Myszkowski, K., and Seidel, H.-P. 2007b. Perceptual Uniformity of Contrast Scaling in Complex Images. APGV 2007, Symposium on Applied Perception in Graphics and Visualization, ACM.
Export
BibTeX
@inproceedings{Yoshida_APGV2007, TITLE = {Perceptual Uniformity of Contrast Scaling in Complex Images}, AUTHOR = {Yoshida, Akiko and Krawczyk, Grzegorz and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-59593-670-7}, DOI = {10.1145/1272582.1272614}, PUBLISHER = {ACM}, YEAR = {2007}, DATE = {2007}, BOOKTITLE = {APGV 2007, Symposium on Applied Perception in Graphics and Visualization}, EDITOR = {Wallraven, Christian and Sundstedt, Veronica and Fleming, Roland W. and Langer, Michael and Spencer, Stephen N.}, PAGES = {137--137}, ADDRESS = {T{\"u}bingen, Germany}, }
Endnote
%0 Conference Proceedings %A Yoshida, Akiko %A Krawczyk, Grzegorz %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptual Uniformity of Contrast Scaling in Complex Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0023-D1B0-5 %R 10.1145/1272582.1272614 %D 2007 %B Symposium on Applied Perception in Graphics and Visualization %Z date of event: 2007-07-25 - 2007-07-27 %C T&#252;bingen, Germany %B APGV 2007 %E Wallraven, Christian; Sundstedt, Veronica; Fleming, Roland W.; Langer, Michael; Spencer, Stephen N. %P 137 - 137 %I ACM %@ 978-1-59593-670-7
Yoon, M., Lee, Y., Lee, S., Ivrissimtzis, I., and Seidel, H.-P. 2007. Surface and Normal Ensembles for Surface Reconstruction. Computer-Aided Design39, 5.
Export
BibTeX
@article{Yoon-cad07, TITLE = {Surface and Normal Ensembles for Surface Reconstruction}, AUTHOR = {Yoon, Mincheol and Lee, Yunjin and Lee, Seungyong and Ivrissimtzis, Ioannis and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0010-4485}, DOI = {doi:10.1016/j.cad.2007.02.008}, LOCALID = {Local-ID: C12573CC004A8E26-0B0A52D73BE6D676C1257299004FB9BF-Yoon-cad07}, YEAR = {2007}, DATE = {2007}, JOURNAL = {Computer-Aided Design}, VOLUME = {39}, NUMBER = {5}, PAGES = {408--420}, }
Endnote
%0 Journal Article %A Yoon, Mincheol %A Lee, Yunjin %A Lee, Seungyong %A Ivrissimtzis, Ioannis %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Surface and Normal Ensembles for Surface Reconstruction : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-20E1-5 %F EDOC: 356593 %R doi:10.1016/j.cad.2007.02.008 %F OTHER: Local-ID: C12573CC004A8E26-0B0A52D73BE6D676C1257299004FB9BF-Yoon-cad07 %D 2007 %* Review method: peer-reviewed %J Computer-Aided Design %V 39 %N 5 %& 408 %P 408 - 420 %@ false
Weinkauf, T., Sahner, J., Theisel, H., Hege, H.-C., and Seidel, H.-P. 2007a. A Unified Feature Extraction Architecture. Active Flow Control : Papers contributed to the Conference Active Flow Control 2006, Springer.
Export
BibTeX
@inproceedings{Theisel2006AFC, TITLE = {A Unified Feature Extraction Architecture}, AUTHOR = {Weinkauf, Tino and Sahner, Jan and Theisel, Holger and Hege, Hans-Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-540-71438-5}, DOI = {10.1007/978-3-540-71439-2_8}, LOCALID = {Local-ID: C12573CC004A8E26-478880B114B751E4C1257235006012FC-Theisel2006AFC}, PUBLISHER = {Springer}, YEAR = {2006}, DATE = {2007}, BOOKTITLE = {Active Flow Control : Papers contributed to the Conference Active Flow Control 2006}, EDITOR = {King, Rudibert}, PAGES = {119--133}, SERIES = {Notes on Numerical Fluid Mechanics and Multidisciplinary Design (NNFM)}, }
Endnote
%0 Conference Proceedings %A Weinkauf, Tino %A Sahner, Jan %A Theisel, Holger %A Hege, Hans-Christian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Unified Feature Extraction Architecture : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1E4E-4 %F EDOC: 356608 %R 10.1007/978-3-540-71439-2_8 %F OTHER: Local-ID: C12573CC004A8E26-478880B114B751E4C1257235006012FC-Theisel2006AFC %I Springer %D 2007 %B Untitled Event %Z date of event: 2006-09-23 - 2006-09-29 %C Berlin, Germany %B Active Flow Control : Papers contributed to the Conference Active Flow Control 2006 %E King, Rudibert %P 119 - 133 %I Springer %@ 978-3-540-71438-5 %B Notes on Numerical Fluid Mechanics and Multidisciplinary Design (NNFM)
Weinkauf, T., Theisel, H., Hege, H.-C., and Seidel, H.-P. 2007b. Feature Flow Fields in Out-of-Core Settings. In: Topology-based Methods in Visualization. Springer, Berlin, Germany.
Export
BibTeX
@incollection{Weinkauf-et-al_TbMiV07, TITLE = {Feature Flow Fields in Out-of-Core Settings}, AUTHOR = {Weinkauf, Tino and Theisel, Holger and Hege, Hans-Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1612-3786}, ISBN = {978-3-540-70822-3}, DOI = {10.1007/978-3-540-70823-0_4}, LOCALID = {Local-ID: C12573CC004A8E26-526BF03E8627E887C125740F0033E35F-Weinkauf07}, PUBLISHER = {Springer}, ADDRESS = {Berlin, Germany}, YEAR = {2007}, DATE = {2007}, BOOKTITLE = {Topology-based Methods in Visualization}, EDITOR = {Hauser, Helwig and Hagen, Hans and Theisel, Holger}, PAGES = {51--63}, SERIES = {Mathematics and Visualization}, }
Endnote
%0 Book Section %A Weinkauf, Tino %A Theisel, Holger %A Hege, Hans-Christian %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Feature Flow Fields in Out-of-Core Settings : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1F3A-8 %F EDOC: 356431 %F OTHER: Local-ID: C12573CC004A8E26-526BF03E8627E887C125740F0033E35F-Weinkauf07 %R 10.1007/978-3-540-70823-0_4 %D 2007 %B Topology-based Methods in Visualization %E Hauser, Helwig; Hagen, Hans; Theisel, Holger %P 51 - 63 %I Springer %C Berlin, Germany %@ 978-3-540-70822-3 %S Mathematics and Visualization %@ false %U https://rdcu.be/dILvE
Von Funck, W., Theisel, H., and Seidel, H.-P. 2007a. Explicit Control of Vector Field Based Shape Deformations. Pacific Graphics 2007 (PG 2007), IEEE.
Export
BibTeX
@inproceedings{Theisel-et-al_PG07, TITLE = {Explicit Control of Vector Field Based Shape Deformations}, AUTHOR = {von Funck, Wolfram and Theisel, Holger and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-0-7695-3009-3}, URL = {http://dx.doi.org/10.1109/PG.2007.26}, DOI = {10.1109/PG.2007.26}, LOCALID = {Local-ID: C125756E0038A185-6897D3B9C56998B4C125757F00329818-Funck2008}, PUBLISHER = {IEEE}, YEAR = {2007}, DATE = {2007}, BOOKTITLE = {Pacific Graphics 2007 (PG 2007)}, EDITOR = {Alexa, Marc and Gortler, Steven and Ju, Tao}, PAGES = {291--300}, ADDRESS = {Maui, HI, USA}, }
Endnote
%0 Conference Proceedings %A von Funck, Wolfram %A Theisel, Holger %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Explicit Control of Vector Field Based Shape Deformations : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2149-4 %F EDOC: 428250 %R 10.1109/PG.2007.26 %U http://dx.doi.org/10.1109/PG.2007.26 %F OTHER: Local-ID: C125756E0038A185-6897D3B9C56998B4C125757F00329818-Funck2008 %D 2007 %B 15th Pacific Conference on Computer Graphics and Applications %Z date of event: 2007-10-29 - 2007-11-02 %C Maui, HI, USA %B Pacific Graphics 2007 %E Alexa, Marc; Gortler, Steven; Ju, Tao %P 291 - 300 %I IEEE %@ 978-0-7695-3009-3
Von Funck, W., Theisel, H., and Seidel, H.-P. 2007b. Implicit Boundary Control of Vector Field Based Shape Deformations. Mathematics of Surfaces XII, Springer.
Abstract
We present a shape deformation approach which preserves<br>volume, prevents self-intersections and allows for exact control of the<br>deformation impact. The volume preservation and prevention of selfintersections<br>are achieved by utilizing the method of Vector Field Based<br>Shape Deformations. This method produces physically plausible deformations<br>efficiently by integrating formally constructed divergence-free<br>vector fields, where the region of influence is described by implicitly<br>defined shapes. We introduce an implicit representation of deformation<br>boundaries, which allows for an exact control of the deformation: By<br>placing the boundaries directly on the shape surface, the user can specify<br>precisely where the shape should be deformed and where not. The simple<br>polygonal representation of the boundaries allows for a GPU implementation,<br>which is able to deform high-resolution meshes in real-time.
Export
BibTeX
@inproceedings{Theisel-et-al_IMA07, TITLE = {Implicit Boundary Control of Vector Field Based Shape Deformations}, AUTHOR = {von Funck, Wolfram and Theisel, Holger and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-540-73842-8}, DOI = {10.1007/978-3-540-73843-5_10}, LOCALID = {Local-ID: C12573CC004A8E26-9E963D372BAEB847C12573FD002E56A9-FucnkTheiselSeidel2007}, PUBLISHER = {Springer}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {We present a shape deformation approach which preserves<br>volume, prevents self-intersections and allows for exact control of the<br>deformation impact. The volume preservation and prevention of selfintersections<br>are achieved by utilizing the method of Vector Field Based<br>Shape Deformations. This method produces physically plausible deformations<br>efficiently by integrating formally constructed divergence-free<br>vector fields, where the region of influence is described by implicitly<br>defined shapes. We introduce an implicit representation of deformation<br>boundaries, which allows for an exact control of the deformation: By<br>placing the boundaries directly on the shape surface, the user can specify<br>precisely where the shape should be deformed and where not. The simple<br>polygonal representation of the boundaries allows for a GPU implementation,<br>which is able to deform high-resolution meshes in real-time.}, BOOKTITLE = {Mathematics of Surfaces XII}, EDITOR = {Martin, Ralph and Sabin, Malcolm and Winkler, Joab}, PAGES = {154--165}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {4647}, ADDRESS = {Sheffield, UK}, }
Endnote
%0 Conference Proceedings %A von Funck, Wolfram %A Theisel, Holger %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Implicit Boundary Control of Vector Field Based Shape Deformations : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1F82-4 %F EDOC: 356493 %R 10.1007/978-3-540-73843-5_10 %F OTHER: Local-ID: C12573CC004A8E26-9E963D372BAEB847C12573FD002E56A9-FucnkTheiselSeidel2007 %D 2007 %B 12th IMA International Conference %Z date of event: 2007-09-04 - 2007-09-06 %C Sheffield, UK %X We present a shape deformation approach which preserves<br>volume, prevents self-intersections and allows for exact control of the<br>deformation impact. The volume preservation and prevention of selfintersections<br>are achieved by utilizing the method of Vector Field Based<br>Shape Deformations. This method produces physically plausible deformations<br>efficiently by integrating formally constructed divergence-free<br>vector fields, where the region of influence is described by implicitly<br>defined shapes. We introduce an implicit representation of deformation<br>boundaries, which allows for an exact control of the deformation: By<br>placing the boundaries directly on the shape surface, the user can specify<br>precisely where the shape should be deformed and where not. The simple<br>polygonal representation of the boundaries allows for a GPU implementation,<br>which is able to deform high-resolution meshes in real-time. %B Mathematics of Surfaces XII %E Martin, Ralph; Sabin, Malcolm; Winkler, Joab %P 154 - 165 %I Springer %@ 3-540-73842-8 %B Lecture Notes in Computer Science %N 4647 %U https://rdcu.be/dIMzO
Von Funck, W., Theisel, H., and Seidel, H.-P. 2007c. Elastic Secondary Deformations by Vector Field Integration. SGP 2007, Fifth Eurographics Symposium on Geometry Processing, Eurographics Association.
Export
BibTeX
@inproceedings{FunckGP2007, TITLE = {Elastic Secondary Deformations by Vector Field Integration}, AUTHOR = {von Funck, Wolfram and Theisel, Holger and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-905673-46-3}, DOI = {10.2312/SGP/SGP07/099-108}, PUBLISHER = {Eurographics Association}, YEAR = {2007}, DATE = {2007}, BOOKTITLE = {SGP 2007, Fifth Eurographics Symposium on Geometry Processing}, EDITOR = {Fellner, Dieter and Spencer, Stephen N.}, PAGES = {99--108}, ADDRESS = {Barcelon, Spain}, }
Endnote
%0 Conference Proceedings %A von Funck, Wolfram %A Theisel, Holger %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Elastic Secondary Deformations by Vector Field Integration : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0023-D497-7 %R 10.2312/SGP/SGP07/099-108 %D 2007 %B Fifth Eurographics Symposium on Geometry Processing %Z date of event: 2007-07-04 - 2007-07-06 %C Barcelon, Spain %B SGP 2007 %E Fellner, Dieter; Spencer, Stephen N. %P 99 - 108 %I Eurographics Association %@ 978-3-905673-46-3
Theobalt, C., Ahmed, N., Lensch, H.P.A., Magnor, M., and Seidel, H.-P. 2007a. Seeing People in Different Light-joint Shape, Motion, and Reflectance Capture. IEEE Transactions on Visualization and Computer Graphics13, 4.
Abstract
By means of passive optical motion capture real people can be authentically <br>animated and photo-realistically textured. To import real-world characters into <br>virtual environments, however, also surface reflectance properties must be <br>known. We describe a video-based modeling approach that captures human shape <br>and motion as well as reflectance characteristics from a handful of <br>synchronized video recordings. The presented method is able to recover <br>spatially varying surface reflectance properties of clothes from multi-view <br>video footage.The resulting model description enables us to realistically <br>reproduce the appearance of animated virtual actors under different lighting <br>conditions, as well as to interchange surface attributes among different <br>people, e.g. for virtual dressing.Our contribution can be used to create <br>\mbox{3D} renditions of real-world people under arbitrary novel lighting <br>conditions on standard graphics hardware.
Export
BibTeX
@article{Theobalt-et-al_TVCG07, TITLE = {Seeing People in Different Light-joint Shape, Motion, and Reflectance Capture}, AUTHOR = {Theobalt, Christian and Ahmed, Naveed and Lensch, Hendrik P. A. and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2007.1006}, LOCALID = {Local-ID: C12573CC004A8E26-602E8F74F225396BC1257296004E7ED7-TheobaltTVCG2007}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {New York, NY}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {By means of passive optical motion capture real people can be authentically <br>animated and photo-realistically textured. To import real-world characters into <br>virtual environments, however, also surface reflectance properties must be <br>known. We describe a video-based modeling approach that captures human shape <br>and motion as well as reflectance characteristics from a handful of <br>synchronized video recordings. The presented method is able to recover <br>spatially varying surface reflectance properties of clothes from multi-view <br>video footage.The resulting model description enables us to realistically <br>reproduce the appearance of animated virtual actors under different lighting <br>conditions, as well as to interchange surface attributes among different <br>people, e.g. for virtual dressing.Our contribution can be used to create <br>\mbox{3D} renditions of real-world people under arbitrary novel lighting <br>conditions on standard graphics hardware.}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics}, VOLUME = {13}, NUMBER = {4}, PAGES = {663--674}, }
Endnote
%0 Journal Article %A Theobalt, Christian %A Ahmed, Naveed %A Lensch, Hendrik P. A. %A Magnor, Marcus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Programming Logics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Seeing People in Different Light-joint Shape, Motion, and Reflectance Capture : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-20A5-B %F EDOC: 356631 %R 10.1109/TVCG.2007.1006 %F OTHER: Local-ID: C12573CC004A8E26-602E8F74F225396BC1257296004E7ED7-TheobaltTVCG2007 %D 2007 %* Review method: peer-reviewed %X By means of passive optical motion capture real people can be authentically <br>animated and photo-realistically textured. To import real-world characters into <br>virtual environments, however, also surface reflectance properties must be <br>known. We describe a video-based modeling approach that captures human shape <br>and motion as well as reflectance characteristics from a handful of <br>synchronized video recordings. The presented method is able to recover <br>spatially varying surface reflectance properties of clothes from multi-view <br>video footage.The resulting model description enables us to realistically <br>reproduce the appearance of animated virtual actors under different lighting <br>conditions, as well as to interchange surface attributes among different <br>people, e.g. for virtual dressing.Our contribution can be used to create <br>\mbox{3D} renditions of real-world people under arbitrary novel lighting <br>conditions on standard graphics hardware. %J IEEE Transactions on Visualization and Computer Graphics %V 13 %N 4 %& 663 %P 663 - 674 %I IEEE Computer Society %C New York, NY %@ false
Theobalt, C., Rössl, C., de Aguiar, E., and Seidel, H.-P. 2007b. Animation Collage. SCA 07: Eurographics/SIGGRAPH Symposium on Computer Animation, Eurographics Association.
Abstract
We propose a method to automatically transform mesh animations into<br> animation collages, i.e. moving assemblies of shape primitives from<br> a database given by an artist. An animation collage is a complete<br> reassembly of the original animation in a new abstract visual style<br> that imitates the spatio-temporal shape and deformation of the<br> input. Our algorithm automatically decomposes input animations into<br> plausible approximately rigid segments and fits to each segment one<br> shape from the database by means of a spatio-temporal matching<br> procedure. The collage is then animated in compliance with the<br> original's shape and motion. Apart from proposing solutions to a<br> number of spatio-temporal alignment problems, this work is an<br> interesting add-on to the graphics artist's toolbox with many<br> applications in arts, non-photorealistic rendering, and animated<br> movie productions. We exemplify the beauty of animation collages by<br> showing results created with our software prototype.
Export
BibTeX
@inproceedings{Theobalt-et-al_SCA07, TITLE = {Animation Collage}, AUTHOR = {Theobalt, Christian and R{\"o}ssl, Christian and de Aguiar, Edilson and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-905673-44-9}, DOI = {10.2312/SCA/SCA07/271-280}, LOCALID = {Local-ID: C12573CC004A8E26-A2279DE021E999A2C12573710054C7CB-theobalt:sca:2007}, PUBLISHER = {Eurographics Association}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {We propose a method to automatically transform mesh animations into<br> animation collages, i.e. moving assemblies of shape primitives from<br> a database given by an artist. An animation collage is a complete<br> reassembly of the original animation in a new abstract visual style<br> that imitates the spatio-temporal shape and deformation of the<br> input. Our algorithm automatically decomposes input animations into<br> plausible approximately rigid segments and fits to each segment one<br> shape from the database by means of a spatio-temporal matching<br> procedure. The collage is then animated in compliance with the<br> original's shape and motion. Apart from proposing solutions to a<br> number of spatio-temporal alignment problems, this work is an<br> interesting add-on to the graphics artist's toolbox with many<br> applications in arts, non-photorealistic rendering, and animated<br> movie productions. We exemplify the beauty of animation collages by<br> showing results created with our software prototype.}, BOOKTITLE = {SCA 07: Eurographics/SIGGRAPH Symposium on Computer Animation}, EDITOR = {Fellner, Dieter and Spencer, Stephen}, PAGES = {271--280}, ADDRESS = {San Diego, CA, USA}, }
Endnote
%0 Conference Proceedings %A Theobalt, Christian %A R&#246;ssl, Christian %A de Aguiar, Edilson %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Programming Logics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Animation Collage : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1E15-F %F EDOC: 356537 %R 10.2312/SCA/SCA07/271-280 %F OTHER: Local-ID: C12573CC004A8E26-A2279DE021E999A2C12573710054C7CB-theobalt:sca:2007 %D 2007 %B 2007 Eurographics/SIGGRAPH Symposium on Computer Animation %Z date of event: 2007-08-02 - 2007-08-04 %C San Diego, CA, USA %X We propose a method to automatically transform mesh animations into<br> animation collages, i.e. moving assemblies of shape primitives from<br> a database given by an artist. An animation collage is a complete<br> reassembly of the original animation in a new abstract visual style<br> that imitates the spatio-temporal shape and deformation of the<br> input. Our algorithm automatically decomposes input animations into<br> plausible approximately rigid segments and fits to each segment one<br> shape from the database by means of a spatio-temporal matching<br> procedure. The collage is then animated in compliance with the<br> original's shape and motion. Apart from proposing solutions to a<br> number of spatio-temporal alignment problems, this work is an<br> interesting add-on to the graphics artist's toolbox with many<br> applications in arts, non-photorealistic rendering, and animated<br> movie productions. We exemplify the beauty of animation collages by<br> showing results created with our software prototype. %B SCA 07: Eurographics/SIGGRAPH Symposium on Computer Animation %E Fellner, Dieter; Spencer, Stephen %P 271 - 280 %I Eurographics Association %@ 978-3-905673-44-9
Theobalt, C., Ahmed, N., Ziegler, G., and Seidel, H.-P. 2007c. High-Quality Reconstruction from Multiview Video Streams. IEEE Signal Processing Magazine24, 6.
Export
BibTeX
@article{Theobalt-et-al_SPM07, TITLE = {High-Quality Reconstruction from Multiview Video Streams}, AUTHOR = {Theobalt, Christian and Ahmed, Naveed and Ziegler, Gernot and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1053-5888}, DOI = {10.1109/MSP.2007.905701}, LOCALID = {Local-ID: C12573CC004A8E26-DE483CCBF527DC25C12573B1004BDB95-ISPM2006}, PUBLISHER = {Institute of Electrical \& Electronics Engineers}, ADDRESS = {New York, NY}, YEAR = {2007}, DATE = {2007}, JOURNAL = {IEEE Signal Processing Magazine}, VOLUME = {24}, NUMBER = {6}, PAGES = {45--57}, }
Endnote
%0 Journal Article %A Theobalt, Christian %A Ahmed, Naveed %A Ziegler, Gernot %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T High-Quality Reconstruction from Multiview Video Streams : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1F6E-3 %F EDOC: 356553 %R 10.1109/MSP.2007.905701 %F OTHER: Local-ID: C12573CC004A8E26-DE483CCBF527DC25C12573B1004BDB95-ISPM2006 %D 2007 %* Review method: peer-reviewed %J IEEE Signal Processing Magazine %O IEEE Signal Process. Mag. %V 24 %N 6 %& 45 %P 45 - 57 %I Institute of Electrical & Electronics Engineers %C New York, NY %@ false
Theisel, H., Weinkauf, T., Hege, H.-C., and Seidel, H.-P. 2007. On the Applicability of Topological Methods for Complex Flow Data. In: Topology-based Methods in Visualization. Springer, Berlin, Germany.
Export
BibTeX
@incollection{Theisel-et-al_TbMiV07, TITLE = {On the Applicability of Topological Methods for Complex Flow Data}, AUTHOR = {Theisel, Holger and Weinkauf, Tino and Hege, Hans-Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1612-3786}, ISBN = {978-3-540-70822-3}, DOI = {10.1007/978-3-540-70823-0_8}, LOCALID = {Local-ID: C12573CC004A8E26-F0A5A191603EE4D0C125740F0036610B-Theisel07}, PUBLISHER = {Springer}, ADDRESS = {Berlin, Germany}, YEAR = {2007}, DATE = {2007}, BOOKTITLE = {Topology-based Methods in Visualization}, EDITOR = {Hauser, Helwig and Hagen, Hans and Theisel, Holger}, PAGES = {105--120}, SERIES = {Mathematics and Visualization}, }
Endnote
%0 Book Section %A Theisel, Holger %A Weinkauf, Tino %A Hege, Hans-Christian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T On the Applicability of Topological Methods for Complex Flow Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-201D-1 %F EDOC: 356430 %F OTHER: Local-ID: C12573CC004A8E26-F0A5A191603EE4D0C125740F0036610B-Theisel07 %R 10.1007/978-3-540-70823-0_8 %D 2007 %B Topology-based Methods in Visualization %E Hauser, Helwig; Hagen, Hans; Theisel, Holger %P 105 - 120 %I Springer %C Berlin, Germany %@ 978-3-540-70822-3 %S Mathematics and Visualization %@ false %U https://rdcu.be/dIxsK
Sunkel, M., Rosenhahn, B., and Seidel, H.-P. 2007. Silhouette Based Generic Model Adaptation for Marker-less Motion Capturing. Human Motion - Understanding, Modeling, Capture and Animation, Springer.
Abstract
This work presents a <br>marker-less motion capture system that <br>incorporates an approach to smoothly adapt a generic model mesh <br>to the individual shape of a tracked person. <br>This is done relying on extracted silhouettes only.<br>Thus, during the capture process the 3D model of a tracked person is learned.<br><br>Depending on a sparse number of 2D-3D correspondences, that are computed <br>along normal directions<br>from image sequences of different cameras,<br>a Laplacian mesh editing tool generates <br>the final adapted model. <br>With the increasing number of frames<br>an approach for temporal coherence reduces the effects of insufficient <br>correspondence data to a minimum and guarantees smooth adaptation results.<br>Further, we present experiments on non-optimal data that<br>show the robustness of our algorithm.
Export
BibTeX
@inproceedings{Sunkel-et-al_HM07, TITLE = {Silhouette Based Generic Model Adaptation for Marker-less Motion Capturing}, AUTHOR = {Sunkel, Martin and Rosenhahn, Bodo and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-540-75702-3}, DOI = {10.1007/978-3-540-75703-0_9}, LOCALID = {Local-ID: C12573CC004A8E26-FA184D37CF20CBC9C12573C9004A390B-Sunkel2007}, PUBLISHER = {Springer}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {This work presents a <br>marker-less motion capture system that <br>incorporates an approach to smoothly adapt a generic model mesh <br>to the individual shape of a tracked person. <br>This is done relying on extracted silhouettes only.<br>Thus, during the capture process the 3D model of a tracked person is learned.<br><br>Depending on a sparse number of 2D-3D correspondences, that are computed <br>along normal directions<br>from image sequences of different cameras,<br>a Laplacian mesh editing tool generates <br>the final adapted model. <br>With the increasing number of frames<br>an approach for temporal coherence reduces the effects of insufficient <br>correspondence data to a minimum and guarantees smooth adaptation results.<br>Further, we present experiments on non-optimal data that<br>show the robustness of our algorithm.}, BOOKTITLE = {Human Motion -- Understanding, Modeling, Capture and Animation}, EDITOR = {Elgammal, Ahmed and Rosenhahn, Bodo and Klette, Reinhard}, PAGES = {119--135}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {4814}, ADDRESS = {Rio de Janeiro, Brazil}, }
Endnote
%0 Conference Proceedings %A Sunkel, Martin %A Rosenhahn, Bodo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Silhouette Based Generic Model Adaptation for Marker-less Motion Capturing : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-20B1-F %F EDOC: 356512 %R 10.1007/978-3-540-75703-0_9 %F OTHER: Local-ID: C12573CC004A8E26-FA184D37CF20CBC9C12573C9004A390B-Sunkel2007 %D 2007 %B Second Workshop on Human Motion %Z date of event: 2007-10-20 - 2007-10-20 %C Rio de Janeiro, Brazil %X This work presents a <br>marker-less motion capture system that <br>incorporates an approach to smoothly adapt a generic model mesh <br>to the individual shape of a tracked person. <br>This is done relying on extracted silhouettes only.<br>Thus, during the capture process the 3D model of a tracked person is learned.<br><br>Depending on a sparse number of 2D-3D correspondences, that are computed <br>along normal directions<br>from image sequences of different cameras,<br>a Laplacian mesh editing tool generates <br>the final adapted model. <br>With the increasing number of frames<br>an approach for temporal coherence reduces the effects of insufficient <br>correspondence data to a minimum and guarantees smooth adaptation results.<br>Further, we present experiments on non-optimal data that<br>show the robustness of our algorithm. %B Human Motion - Understanding, Modeling, Capture and Animation %E Elgammal, Ahmed; Rosenhahn, Bodo; Klette, Reinhard %P 119 - 135 %I Springer %@ 3-540-75702-3 %B Lecture Notes in Computer Science %N 4814 %U https://rdcu.be/dIMML
Stoll, C., de Aguiar, E., Theobalt, C., and Seidel, H.-P. 2007. A volumetric approach to interactive shape editing. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
We present a novel approach to real-time shape editing that produces physically plausible deformations using an efficient and easy-to-implement volumetric approach. Our algorithm alternates between a linear tetrahedral Laplacian deformation step and a differential update in which rotational transformations are approximated. By means of this iterative process we can achieve non-linear deformation results while having to solve only linear equation systems. The differential update step relies on estimating the rotational component of the deformation relative to the rest pose. This makes the method very stable as the shape can be reverted to its rest pose even after extreme deformations. Only a few point handles or area handles imposing an orientation are needed to achieve high quality deformations, which makes the approach intuitive to use. We show that our technique is well suited for interactive shape manipulation and also provides an elegant way to animate models with captured motion data.
Export
BibTeX
@techreport{Stoll2007, TITLE = {A volumetric approach to interactive shape editing}, AUTHOR = {Stoll, Carsten and de Aguiar, Edilson and Theobalt, Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2007-4-004}, NUMBER = {MPI-I-2007-4-004}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {We present a novel approach to real-time shape editing that produces physically plausible deformations using an efficient and easy-to-implement volumetric approach. Our algorithm alternates between a linear tetrahedral Laplacian deformation step and a differential update in which rotational transformations are approximated. By means of this iterative process we can achieve non-linear deformation results while having to solve only linear equation systems. The differential update step relies on estimating the rotational component of the deformation relative to the rest pose. This makes the method very stable as the shape can be reverted to its rest pose even after extreme deformations. Only a few point handles or area handles imposing an orientation are needed to achieve high quality deformations, which makes the approach intuitive to use. We show that our technique is well suited for interactive shape manipulation and also provides an elegant way to animate models with captured motion data.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Stoll, Carsten %A de Aguiar, Edilson %A Theobalt, Christian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A volumetric approach to interactive shape editing : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-66D6-4 %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2007-4-004 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2007 %P 28 p. %X We present a novel approach to real-time shape editing that produces physically plausible deformations using an efficient and easy-to-implement volumetric approach. Our algorithm alternates between a linear tetrahedral Laplacian deformation step and a differential update in which rotational transformations are approximated. By means of this iterative process we can achieve non-linear deformation results while having to solve only linear equation systems. The differential update step relies on estimating the rotational component of the deformation relative to the rest pose. This makes the method very stable as the shape can be reverted to its rest pose even after extreme deformations. Only a few point handles or area handles imposing an orientation are needed to achieve high quality deformations, which makes the approach intuitive to use. We show that our technique is well suited for interactive shape manipulation and also provides an elegant way to animate models with captured motion data. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Song, W., Belyaev, A., and Seidel, H.-P. 2007a. Automatic Generation of Bas-reliefs from 3D Shapes. IEEE International Conference on Shape Modeling and Applications 2007 (SMI 2007), IEEE Computer Society.
Abstract
In this paper, we introduce and study a new problem of converting a given 3D <br>shape (or a 2.5D range data) into a bas-relief. The problem can be considered <br>as a geometry counterpart of the HDR image compression problem widely studied <br>in computer graphics. In our approach to the shape bas-reliefing problem, we <br>combine the concepts of mesh saliency, shape exaggerating, and discrete <br>differential coordinates. The final bas-relief has a small width, preserves <br>salient features of the original 3D shape, and, therefore, can be used for <br>shape decorating purposes.
Export
BibTeX
@inproceedings{Song-et-al_SMI07, TITLE = {Automatic Generation of Bas-reliefs from {3D} Shapes}, AUTHOR = {Song, Wenhao and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-2815-5}, DOI = {10.1109/SMI.2007.9}, LOCALID = {Local-ID: C12573CC004A8E26-01F0B4BD79603BDBC125730E0030A2F9-SongBelyaevSeidel2007}, PUBLISHER = {IEEE Computer Society}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {In this paper, we introduce and study a new problem of converting a given 3D <br>shape (or a 2.5D range data) into a bas-relief. The problem can be considered <br>as a geometry counterpart of the HDR image compression problem widely studied <br>in computer graphics. In our approach to the shape bas-reliefing problem, we <br>combine the concepts of mesh saliency, shape exaggerating, and discrete <br>differential coordinates. The final bas-relief has a small width, preserves <br>salient features of the original 3D shape, and, therefore, can be used for <br>shape decorating purposes.}, BOOKTITLE = {IEEE International Conference on Shape Modeling and Applications 2007 (SMI 2007)}, PAGES = {211--214}, ADDRESS = {Lyon, France}, }
Endnote
%0 Conference Proceedings %A Song, Wenhao %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Automatic Generation of Bas-reliefs from 3D Shapes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1E62-3 %F EDOC: 356544 %F OTHER: Local-ID: C12573CC004A8E26-01F0B4BD79603BDBC125730E0030A2F9-SongBelyaevSeidel2007 %R 10.1109/SMI.2007.9 %D 2007 %B IEEE International Conference on Shape Modeling and Applications 2007 %Z date of event: 2007-06-13 - 2007-06-15 %C Lyon, France %X In this paper, we introduce and study a new problem of converting a given 3D <br>shape (or a 2.5D range data) into a bas-relief. The problem can be considered <br>as a geometry counterpart of the HDR image compression problem widely studied <br>in computer graphics. In our approach to the shape bas-reliefing problem, we <br>combine the concepts of mesh saliency, shape exaggerating, and discrete <br>differential coordinates. The final bas-relief has a small width, preserves <br>salient features of the original 3D shape, and, therefore, can be used for <br>shape decorating purposes. %B IEEE International Conference on Shape Modeling and Applications 2007 %P 211 - 214 %I IEEE Computer Society %@ 0-7695-2815-5
Song, M., Dong, Z., Theobalt, C., Wang, H., Liu, Z., and Seidel, H.-P. 2007b. A General Framework for Efficient 2D and 3D Facial Expression Analogy. IEEE Transactions on Multimedia9, 7.
Abstract
Facial expression analogy provides computer animation<br>professionals with a tool to map expressions of an arbitrary<br>source face onto an arbitrary target face. In the recent past,<br>several algorithms have been presented in the literature that aim<br>at putting the expression analogy paradigm into practice. Some<br>of these methods exclusively handle expression mapping between<br>3D face models, while others enable the transfer of expressions<br>between images of faces only. None of them, however, represents<br>a more general framework that can be applied to either of<br>these two face representations. In this paper, we describe a <br>novel generic method for analogy-based facial animation that <br>employs the same efficient framework to transfer facial <br>expressions between arbitrary 3D face models, as well as between <br>images of performer’s faces. We propose a novel geometry encoding<br>for triangle meshes, vertex-tent-coordinates, that enables us to<br>formulate expression transfer in the 2D and the 3D case as a<br>solution to a simple system of linear equations. Our experiments<br>show that our method outperforms many previous analogy-based<br>animation approaches in terms of achieved animation quality,<br>computation time and generality.
Export
BibTeX
@article{Song-et-al_TM07, TITLE = {A General Framework for Efficient {2D} and {3D} Facial Expression Analogy}, AUTHOR = {Song, Mingli and Dong, Zhao and Theobalt, Christian and Wang, Huiqiong and Liu, Zicheng and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1520-9210}, DOI = {10.1109/TMM.2007.906591}, LOCALID = {Local-ID: C12573CC004A8E26-1A5AA9E45D0E4FA8C125731A00516457-SDC*2007}, PUBLISHER = {Institute of Electrical and Electronics Engineers}, ADDRESS = {Piscataway, NJ}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {Facial expression analogy provides computer animation<br>professionals with a tool to map expressions of an arbitrary<br>source face onto an arbitrary target face. In the recent past,<br>several algorithms have been presented in the literature that aim<br>at putting the expression analogy paradigm into practice. Some<br>of these methods exclusively handle expression mapping between<br>3D face models, while others enable the transfer of expressions<br>between images of faces only. None of them, however, represents<br>a more general framework that can be applied to either of<br>these two face representations. In this paper, we describe a <br>novel generic method for analogy-based facial animation that <br>employs the same efficient framework to transfer facial <br>expressions between arbitrary 3D face models, as well as between <br>images of performer{\textquoteright}s faces. We propose a novel geometry encoding<br>for triangle meshes, vertex-tent-coordinates, that enables us to<br>formulate expression transfer in the 2D and the 3D case as a<br>solution to a simple system of linear equations. Our experiments<br>show that our method outperforms many previous analogy-based<br>animation approaches in terms of achieved animation quality,<br>computation time and generality.}, JOURNAL = {IEEE Transactions on Multimedia}, VOLUME = {9}, NUMBER = {7}, PAGES = {1384--1395}, }
Endnote
%0 Journal Article %A Song, Mingli %A Dong, Zhao %A Theobalt, Christian %A Wang, Huiqiong %A Liu, Zicheng %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Programming Logics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T A General Framework for Efficient 2D and 3D Facial Expression Analogy : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1DEA-A %F EDOC: 356541 %R 10.1109/TMM.2007.906591 %F OTHER: Local-ID: C12573CC004A8E26-1A5AA9E45D0E4FA8C125731A00516457-SDC*2007 %D 2007 %* Review method: peer-reviewed %X Facial expression analogy provides computer animation<br>professionals with a tool to map expressions of an arbitrary<br>source face onto an arbitrary target face. In the recent past,<br>several algorithms have been presented in the literature that aim<br>at putting the expression analogy paradigm into practice. Some<br>of these methods exclusively handle expression mapping between<br>3D face models, while others enable the transfer of expressions<br>between images of faces only. None of them, however, represents<br>a more general framework that can be applied to either of<br>these two face representations. In this paper, we describe a <br>novel generic method for analogy-based facial animation that <br>employs the same efficient framework to transfer facial <br>expressions between arbitrary 3D face models, as well as between <br>images of performer&#8217;s faces. We propose a novel geometry encoding<br>for triangle meshes, vertex-tent-coordinates, that enables us to<br>formulate expression transfer in the 2D and the 3D case as a<br>solution to a simple system of linear equations. Our experiments<br>show that our method outperforms many previous analogy-based<br>animation approaches in terms of achieved animation quality,<br>computation time and generality. %J IEEE Transactions on Multimedia %V 9 %N 7 %& 1384 %P 1384 - 1395 %I Institute of Electrical and Electronics Engineers %C Piscataway, NJ %@ false
Slesareva, N., Bühler, T., Hagenburg, K.U., et al. 2007. Robust Variational Reconstruction from Multiple Views. Image Analysis (SCIA 2007), Springer.
Export
BibTeX
@inproceedings{DBLP:conf/scia/SlesarevaBHWBKS07, TITLE = {Robust Variational Reconstruction from Multiple Views}, AUTHOR = {Slesareva, Natalia and B{\"u}hler, Thomas and Hagenburg, Kai Uwe and Weickert, Joachim and Bruhn, Andr{\'e}s and Karni, Zachi and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-540-73039-2}, DOI = {10.1007/978-3-540-73040-8_18}, PUBLISHER = {Springer}, YEAR = {2007}, DATE = {2007}, BOOKTITLE = {Image Analysis (SCIA 2007)}, EDITOR = {Ersb{\o}ll, Bjarne K. and Pedersen, Kim Steenstrup}, PAGES = {173--182}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {4522}, ADDRESS = {Aalborg, Denmark}, }
Endnote
%0 Conference Proceedings %A Slesareva, Natalia %A B&#252;hler, Thomas %A Hagenburg, Kai Uwe %A Weickert, Joachim %A Bruhn, Andr&#233;s %A Karni, Zachi %A Seidel, Hans-Peter %+ External Organizations External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Robust Variational Reconstruction from Multiple Views : %G eng %U http://hdl.handle.net/21.11116/0000-000F-503B-5 %R 10.1007/978-3-540-73040-8_18 %D 2007 %B 15th Scandinavian Conference on Image Analysis %Z date of event: 2007-06-10 - 2007-06-24 %C Aalborg, Denmark %B Image Analysis %E Ersb&#248;ll, Bjarne K.; Pedersen, Kim Steenstrup %P 173 - 182 %I Springer %@ 978-3-540-73039-2 %B Lecture Notes in Computer Science %N 4522 %U https://rdcu.be/dILP6
Shi, K., Theisel, H., Seidel, H.-P., Hauser, H., Hege, H.-C., and Weinkauf, T. 2007. Extracting Separation Surfaces of Path Line Oriented Topology in Periodic 2D Time-Dependent Vector Fields. Journal of WSCG15, 1-3.
Abstract
This paper presents an approach to extracting the separation surfaces from <br>periodic 2D time-dependent vector fields based on a<br>recently introduced path line oriented topology. This topology is based on <br>critical path lines which repeat the same spatial cycle<br>per time period. Around those path lines there are areas of similar asymptotic <br>flow behavior (basins) which are captured by a<br>2D Poincaré map as a discrete dynamical system. Due to pseudo discontinuities <br>in this map and the discrete integration scheme,<br>separatrices between the basins can’t be obtained as integral curves. Instead <br>we choose a point-wise approach to segment the<br>Poincaré map and apply image analysis algorithms to extract the 2D separation <br>curves. Starting from those curves we integrate<br>separation surfaces which partition the periodic 2D time-dependent vector field <br>into areas of similar path line behavior. We<br>apply our approach to a number of data sets to demonstrate its utility.
Export
BibTeX
@article{Shi-et-al_JWSCG07, TITLE = {Extracting Separation Surfaces of Path Line Oriented Topology in Periodic {2D} Time-Dependent Vector Fields}, AUTHOR = {Shi, Kuangyu and Theisel, Holger and Seidel, Hans-Peter and Hauser, Helwig and Hege, Hans-Christian and Weinkauf, Tino}, LANGUAGE = {eng}, ISSN = {1213-6972}, LOCALID = {Local-ID: C12573CC004A8E26-5EFDD7A180FF14FBC125739A004B3B1B-Shi2006}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {This paper presents an approach to extracting the separation surfaces from <br>periodic 2D time-dependent vector fields based on a<br>recently introduced path line oriented topology. This topology is based on <br>critical path lines which repeat the same spatial cycle<br>per time period. Around those path lines there are areas of similar asymptotic <br>flow behavior (basins) which are captured by a<br>2D Poincar{\'e} map as a discrete dynamical system. Due to pseudo discontinuities <br>in this map and the discrete integration scheme,<br>separatrices between the basins can{\textquoteright}t be obtained as integral curves. Instead <br>we choose a point-wise approach to segment the<br>Poincar{\'e} map and apply image analysis algorithms to extract the 2D separation <br>curves. Starting from those curves we integrate<br>separation surfaces which partition the periodic 2D time-dependent vector field <br>into areas of similar path line behavior. We<br>apply our approach to a number of data sets to demonstrate its utility.}, JOURNAL = {Journal of WSCG}, VOLUME = {15}, NUMBER = {1-3}, PAGES = {75--82}, }
Endnote
%0 Journal Article %A Shi, Kuangyu %A Theisel, Holger %A Seidel, Hans-Peter %A Hauser, Helwig %A Hege, Hans-Christian %A Weinkauf, Tino %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations %T Extracting Separation Surfaces of Path Line Oriented Topology in Periodic 2D Time-Dependent Vector Fields : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1F2D-6 %F EDOC: 356573 %F OTHER: Local-ID: C12573CC004A8E26-5EFDD7A180FF14FBC125739A004B3B1B-Shi2006 %D 2007 %* Review method: peer-reviewed %X This paper presents an approach to extracting the separation surfaces from <br>periodic 2D time-dependent vector fields based on a<br>recently introduced path line oriented topology. This topology is based on <br>critical path lines which repeat the same spatial cycle<br>per time period. Around those path lines there are areas of similar asymptotic <br>flow behavior (basins) which are captured by a<br>2D Poincar&#233; map as a discrete dynamical system. Due to pseudo discontinuities <br>in this map and the discrete integration scheme,<br>separatrices between the basins can&#8217;t be obtained as integral curves. Instead <br>we choose a point-wise approach to segment the<br>Poincar&#233; map and apply image analysis algorithms to extract the 2D separation <br>curves. Starting from those curves we integrate<br>separation surfaces which partition the periodic 2D time-dependent vector field <br>into areas of similar path line behavior. We<br>apply our approach to a number of data sets to demonstrate its utility. %J Journal of WSCG %V 15 %N 1-3 %& 75 %P 75 - 82 %@ false
Schultz, T., Theisel, H., and Seidel, H.-P. 2007a. Segmentation of DT-MRI Anisotropy Isosurfaces. EuroVis07: Joint Eurographics - IEEE VGTC Symposium on Visualization, Eurographics Association.
Abstract
While isosurfaces of anisotropy measures for data from diffusion<br> tensor magnetic resonance imaging (DT-MRI) are known to depict major<br> anatomical structures, the anisotropy metric reduces the rich tensor<br> data to a simple scalar field. In this work, we suggest that the<br> part of the data which has been ignored by the metric can be used to<br> segment anisotropy isosurfaces into anatomically meaningful regions.<br> For the implementation, we propose an edge-based watershed method<br> that adapts and extends a method from curvature-based mesh<br> segmentation. Finally, we use the segmentation<br> results to enhance visualization of the data.
Export
BibTeX
@inproceedings{Schultz-et-al_EUROVIS07, TITLE = {Segmentation of {DT}-{MRI} Anisotropy Isosurfaces}, AUTHOR = {Schultz, Thomas and Theisel, Holger and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {1-56881-362-2}, DOI = {10.2312/VisSym/EuroVis07/187-194}, LOCALID = {Local-ID: C12573CC004A8E26-3E3C7BEB653B3738C125729200502DF3-Schultz2007EV}, PUBLISHER = {Eurographics Association}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {While isosurfaces of anisotropy measures for data from diffusion<br> tensor magnetic resonance imaging (DT-MRI) are known to depict major<br> anatomical structures, the anisotropy metric reduces the rich tensor<br> data to a simple scalar field. In this work, we suggest that the<br> part of the data which has been ignored by the metric can be used to<br> segment anisotropy isosurfaces into anatomically meaningful regions.<br> For the implementation, we propose an edge-based watershed method<br> that adapts and extends a method from curvature-based mesh<br> segmentation. Finally, we use the segmentation<br> results to enhance visualization of the data.}, BOOKTITLE = {EuroVis07: Joint Eurographics -- IEEE VGTC Symposium on Visualization}, EDITOR = {Fellner, Dieter and M{\"o}ller, Torsten}, PAGES = {187--194}, SERIES = {VisSym/ EUROVIS: Joint Eurographics -- IEEE VGTC Symposium on Visualization}, ADDRESS = {Norrk{\"o}ping, Sweden}, }
Endnote
%0 Conference Proceedings %A Schultz, Thomas %A Theisel, Holger %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Segmentation of DT-MRI Anisotropy Isosurfaces : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-20A8-5 %F EDOC: 356605 %R 10.2312/VisSym/EuroVis07/187-194 %F OTHER: Local-ID: C12573CC004A8E26-3E3C7BEB653B3738C125729200502DF3-Schultz2007EV %D 2007 %B EuroVis07: Joint Eurographics - IEEE VGTC Symposium on Visualization %Z date of event: 2007-05-23 - 2007-05-25 %C Norrk&#246;ping, Sweden %X While isosurfaces of anisotropy measures for data from diffusion<br> tensor magnetic resonance imaging (DT-MRI) are known to depict major<br> anatomical structures, the anisotropy metric reduces the rich tensor<br> data to a simple scalar field. In this work, we suggest that the<br> part of the data which has been ignored by the metric can be used to<br> segment anisotropy isosurfaces into anatomically meaningful regions.<br> For the implementation, we propose an edge-based watershed method<br> that adapts and extends a method from curvature-based mesh<br> segmentation. Finally, we use the segmentation<br> results to enhance visualization of the data. %B EuroVis07: Joint Eurographics - IEEE VGTC Symposium on Visualization %E Fellner, Dieter; M&#246;ller, Torsten %P 187 - 194 %I Eurographics Association %@ 1-56881-362-2 %B VisSym/ EUROVIS: Joint Eurographics - IEEE VGTC Symposium on Visualization
Schultz, T., Theisel, H., and Seidel, H.-P. 2007b. Topological Visualization of Brain Diffusion MRI Data. IEEE Transactions on Visualization and Computer Graphics13, 6.
Abstract
Topological methods give concise and expressive visual<br> representations of flow fields. The present work suggests a<br> comparable method for the visualization of human brain diffusion MRI<br> data. We explore existing techniques for the topological analysis of<br> generic tensor fields, but find them inappropriate for diffusion MRI<br> data. Thus, we propose a novel approach that considers the<br> asymptotic behavior of a probabilistic fiber tracking method and<br> define analogs of the basic concepts of flow topology, like critical<br> points, basins, and faces, with interpretations in terms of brain<br> anatomy. The resulting features are fuzzy, reflecting the<br> uncertainty inherent in any connectivity estimate from diffusion<br> imaging. We describe an algorithm to extract the new type of<br> features, demonstrate its robustness under noise, and present<br> results for two regions in a diffusion MRI dataset to illustrate<br> that the method allows a meaningful visual analysis of probabilistic<br> fiber tracking results.
Export
BibTeX
@article{Schultz-et-al_TVCG07, TITLE = {Topological Visualization of Brain Diffusion {MRI} Data}, AUTHOR = {Schultz, Thomas and Theisel, Holger and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2007.70602}, LOCALID = {Local-ID: C12573CC004A8E26-2A70F8DA200F18B1C12573AE0050EDD3-Schultz2007Vis}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {New York, NY}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {Topological methods give concise and expressive visual<br> representations of flow fields. The present work suggests a<br> comparable method for the visualization of human brain diffusion MRI<br> data. We explore existing techniques for the topological analysis of<br> generic tensor fields, but find them inappropriate for diffusion MRI<br> data. Thus, we propose a novel approach that considers the<br> asymptotic behavior of a probabilistic fiber tracking method and<br> define analogs of the basic concepts of flow topology, like critical<br> points, basins, and faces, with interpretations in terms of brain<br> anatomy. The resulting features are fuzzy, reflecting the<br> uncertainty inherent in any connectivity estimate from diffusion<br> imaging. We describe an algorithm to extract the new type of<br> features, demonstrate its robustness under noise, and present<br> results for two regions in a diffusion MRI dataset to illustrate<br> that the method allows a meaningful visual analysis of probabilistic<br> fiber tracking results.}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics}, VOLUME = {13}, NUMBER = {6}, PAGES = {1496--1503}, }
Endnote
%0 Journal Article %A Schultz, Thomas %A Theisel, Holger %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Topological Visualization of Brain Diffusion MRI Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-210F-A %F EDOC: 356534 %R 10.1109/TVCG.2007.70602 %F OTHER: Local-ID: C12573CC004A8E26-2A70F8DA200F18B1C12573AE0050EDD3-Schultz2007Vis %D 2007 %* Review method: peer-reviewed %X Topological methods give concise and expressive visual<br> representations of flow fields. The present work suggests a<br> comparable method for the visualization of human brain diffusion MRI<br> data. We explore existing techniques for the topological analysis of<br> generic tensor fields, but find them inappropriate for diffusion MRI<br> data. Thus, we propose a novel approach that considers the<br> asymptotic behavior of a probabilistic fiber tracking method and<br> define analogs of the basic concepts of flow topology, like critical<br> points, basins, and faces, with interpretations in terms of brain<br> anatomy. The resulting features are fuzzy, reflecting the<br> uncertainty inherent in any connectivity estimate from diffusion<br> imaging. We describe an algorithm to extract the new type of<br> features, demonstrate its robustness under noise, and present<br> results for two regions in a diffusion MRI dataset to illustrate<br> that the method allows a meaningful visual analysis of probabilistic<br> fiber tracking results. %J IEEE Transactions on Visualization and Computer Graphics %V 13 %N 6 %& 1496 %P 1496 - 1503 %I IEEE Computer Society %C New York, NY %@ false
Scherbaum, K., Sunkel, M., Seidel, H.-P., and Blanz, V. 2007. Prediction of Individual Non-linear Aging Trajectories of Faces. Computer Graphics Forum, Blackwell.
Abstract
Represented in a Morphable Model, 3D faces follow curved trajectories <br>in face space as they age.<br>We present a novel algorithm that computes the individual aging<br>trajectories for given faces, based on a non-linear <br>function that assigns an age to each face vector. <br>This function is learned from a database of 3D scans of teenagers and adults<br>using support vector regression.<br><br>To apply the aging prediction to images of faces, we <br>reconstruct a 3D model from the input image, <br>apply the aging transformation on both shape and texture,<br>and then render the face back into the same image or into<br>images of other individuals at the appropriate ages,<br>for example images of older children.<br>Among other applications, our system can help to find missing children.
Export
BibTeX
@inproceedings{Scherbaum-et-al_Eurographics07, TITLE = {Prediction of Individual Non-linear Aging Trajectories of Faces}, AUTHOR = {Scherbaum, Kristina and Sunkel, Martin and Seidel, Hans-Peter and Blanz, Volker}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2007.01050.x}, LOCALID = {Local-ID: C12573CC004A8E26-AE538B82F11CC3D4C12573AE0055AD9E-ScheSunBlaSei07}, PUBLISHER = {Blackwell}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {Represented in a Morphable Model, 3D faces follow curved trajectories <br>in face space as they age.<br>We present a novel algorithm that computes the individual aging<br>trajectories for given faces, based on a non-linear <br>function that assigns an age to each face vector. <br>This function is learned from a database of 3D scans of teenagers and adults<br>using support vector regression.<br><br>To apply the aging prediction to images of faces, we <br>reconstruct a 3D model from the input image, <br>apply the aging transformation on both shape and texture,<br>and then render the face back into the same image or into<br>images of other individuals at the appropriate ages,<br>for example images of older children.<br>Among other applications, our system can help to find missing children.}, BOOKTITLE = {Eurographics 2007}, EDITOR = {Cohen-Or, Daniel and Slavik, Pavel}, PAGES = {285--294}, JOURNAL = {Computer Graphics Forum}, VOLUME = {26}, ISSUE = {3}, ADDRESS = {Prague, Czech Republic}, }
Endnote
%0 Conference Proceedings %A Scherbaum, Kristina %A Sunkel, Martin %A Seidel, Hans-Peter %A Blanz, Volker %+ Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Prediction of Individual Non-linear Aging Trajectories of Faces : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2061-3 %F EDOC: 356528 %R 10.1111/j.1467-8659.2007.01050.x %F OTHER: Local-ID: C12573CC004A8E26-AE538B82F11CC3D4C12573AE0055AD9E-ScheSunBlaSei07 %D 2007 %B Eurographics 2007 %Z date of event: 2007-09-03 - 2007-09-07 %C Prague, Czech Republic %X Represented in a Morphable Model, 3D faces follow curved trajectories <br>in face space as they age.<br>We present a novel algorithm that computes the individual aging<br>trajectories for given faces, based on a non-linear <br>function that assigns an age to each face vector. <br>This function is learned from a database of 3D scans of teenagers and adults<br>using support vector regression.<br><br>To apply the aging prediction to images of faces, we <br>reconstruct a 3D model from the input image, <br>apply the aging transformation on both shape and texture,<br>and then render the face back into the same image or into<br>images of other individuals at the appropriate ages,<br>for example images of older children.<br>Among other applications, our system can help to find missing children. %B Eurographics 2007 %E Cohen-Or, Daniel; Slavik, Pavel %P 285 - 294 %I Blackwell %J Computer Graphics Forum %V 26 %N 3 %I Blackwell-Wiley %@ false
Schall, O., Belyaev, A., and Seidel, H.-P. 2007a. Feature-preserving Non-local Denoising of Static and Time-varying Range Data. SPM ’07: Proceedings of the 2007 ACM symposium on Solid and physical modeling, ACM.
Abstract
We present a new method for noise removal on static and time-varying range <br>data. Our approach predicts the restored position of a perturbed vertex using <br>similar vertices in its neighborhood. It defines the required similarity <br>measure in a new non-local fashion which compares regions of the surface <br>instead of point pairs. This allows our algorithm to obtain a more accurate <br>denoising result than previous state-of-the-art approaches and, at the same <br>time, to better preserve fine features of the surface. Furthermore, our <br>approach is easy to implement, effective, and flexibly applicable to different <br>types of scanned data. We demonstrate this on several static and interesting <br>new time-varying datasets obtained using laser and structured light scanners.
Export
BibTeX
@inproceedings{Schall-et-al_SPM07, TITLE = {Feature-preserving Non-local Denoising of Static and Time-varying Range Data}, AUTHOR = {Schall, Oliver and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-59593-666-0}, DOI = {10.1145/1236246.1236277}, LOCALID = {Local-ID: C12573CC004A8E26-A3335C721A80A788C1257298003CDDAD-spm07sbs}, PUBLISHER = {ACM}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {We present a new method for noise removal on static and time-varying range <br>data. Our approach predicts the restored position of a perturbed vertex using <br>similar vertices in its neighborhood. It defines the required similarity <br>measure in a new non-local fashion which compares regions of the surface <br>instead of point pairs. This allows our algorithm to obtain a more accurate <br>denoising result than previous state-of-the-art approaches and, at the same <br>time, to better preserve fine features of the surface. Furthermore, our <br>approach is easy to implement, effective, and flexibly applicable to different <br>types of scanned data. We demonstrate this on several static and interesting <br>new time-varying datasets obtained using laser and structured light scanners.}, BOOKTITLE = {SPM '07: Proceedings of the 2007 ACM symposium on Solid and physical modeling}, EDITOR = {Spencer, Stephen}, PAGES = {217--222}, ADDRESS = {Beijing, China}, }
Endnote
%0 Conference Proceedings %A Schall, Oliver %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Feature-preserving Non-local Denoising of Static and Time-varying Range Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1F40-7 %F EDOC: 356594 %R 10.1145/1236246.1236277 %F OTHER: Local-ID: C12573CC004A8E26-A3335C721A80A788C1257298003CDDAD-spm07sbs %D 2007 %B 2007 ACM Symposium on Solid and Physical Modeling %Z date of event: 2007-06-04 - 2007-06-06 %C Beijing, China %X We present a new method for noise removal on static and time-varying range <br>data. Our approach predicts the restored position of a perturbed vertex using <br>similar vertices in its neighborhood. It defines the required similarity <br>measure in a new non-local fashion which compares regions of the surface <br>instead of point pairs. This allows our algorithm to obtain a more accurate <br>denoising result than previous state-of-the-art approaches and, at the same <br>time, to better preserve fine features of the surface. Furthermore, our <br>approach is easy to implement, effective, and flexibly applicable to different <br>types of scanned data. We demonstrate this on several static and interesting <br>new time-varying datasets obtained using laser and structured light scanners. %B SPM '07: Proceedings of the 2007 ACM symposium on Solid and physical modeling %E Spencer, Stephen %P 217 - 222 %I ACM %@ 978-1-59593-666-0
Schall, O., Belyaev, A., and Seidel, H.-P. 2007b. Error-guided Adaptive Fourier-based Surface Reconstruction. Computer-Aided Design39, 5.
Abstract
In this paper, we propose to combine Kazhdan's FFT-based approach to surface <br>reconstruction from oriented points with adaptive subdivision and partition of <br>unity blending techniques. This removes the main drawback of the FFT-based <br>approach which is a high memory consumption for geometrically complex datasets. <br>This allows us to achieve a higher reconstruction accuracy compared with the <br>original global approach. Furthermore, our reconstruction process is guided by <br>a global error control accomplished by computing the Hausdorff distance of <br>selected input samples to intermediate reconstructions. The advantages of our <br>surface reconstruction method include also a more robust surface restoration in <br>regions where the surface folds back to itself.
Export
BibTeX
@article{Schall-et-al_CAD07, TITLE = {Error-guided Adaptive Fourier-based Surface Reconstruction}, AUTHOR = {Schall, Oliver and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0010-4485}, DOI = {10.1016/j.cad.2007.02.005}, LOCALID = {Local-ID: C12573CC004A8E26-50E2FFBC7E84035FC1257298004821AC-cad07sbs}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {In this paper, we propose to combine Kazhdan's FFT-based approach to surface <br>reconstruction from oriented points with adaptive subdivision and partition of <br>unity blending techniques. This removes the main drawback of the FFT-based <br>approach which is a high memory consumption for geometrically complex datasets. <br>This allows us to achieve a higher reconstruction accuracy compared with the <br>original global approach. Furthermore, our reconstruction process is guided by <br>a global error control accomplished by computing the Hausdorff distance of <br>selected input samples to intermediate reconstructions. The advantages of our <br>surface reconstruction method include also a more robust surface restoration in <br>regions where the surface folds back to itself.}, JOURNAL = {Computer-Aided Design}, VOLUME = {39}, NUMBER = {5}, PAGES = {421--426}, }
Endnote
%0 Journal Article %A Schall, Oliver %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Error-guided Adaptive Fourier-based Surface Reconstruction : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1F14-9 %F EDOC: 356599 %R 10.1016/j.cad.2007.02.005 %F OTHER: Local-ID: C12573CC004A8E26-50E2FFBC7E84035FC1257298004821AC-cad07sbs %D 2007 %* Review method: peer-reviewed %X In this paper, we propose to combine Kazhdan's FFT-based approach to surface <br>reconstruction from oriented points with adaptive subdivision and partition of <br>unity blending techniques. This removes the main drawback of the FFT-based <br>approach which is a high memory consumption for geometrically complex datasets. <br>This allows us to achieve a higher reconstruction accuracy compared with the <br>original global approach. Furthermore, our reconstruction process is guided by <br>a global error control accomplished by computing the Hausdorff distance of <br>selected input samples to intermediate reconstructions. The advantages of our <br>surface reconstruction method include also a more robust surface restoration in <br>regions where the surface folds back to itself. %J Computer-Aided Design %V 39 %N 5 %& 421 %P 421 - 426 %@ false
Saleem, W., Schall, O., Patanè, G., Belyaev, A., and Seidel, H.-P. 2007a. On Stochastic Methods for Surface Reconstruction. The Visual Computer23, 6.
Abstract
In this article, we present and discuss three statistical methods for Surface <br>Reconstruction. A typical input to a Surface Reconstruction technique consists <br>of a large set of points that has been sampled from a smooth surface and <br>contains uncertain data in the form of noise and outliers. We first present a <br>method that filters out uncertain and redundant information yielding a more <br>accurate and economical surface representation. Then we present two methods, <br>each of which converts the input point data to a standard shape representation; <br>the first produces an implicit representation while the second yields a <br>triangle mesh.
Export
BibTeX
@article{Saleem-et-al_Vis.Comp.07, TITLE = {On Stochastic Methods for Surface Reconstruction}, AUTHOR = {Saleem, Waqar and Schall, Oliver and Patan{\`e}, Giuseppe and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0178-2789}, DOI = {10.1007/s00371-006-0094-3}, LOCALID = {Local-ID: C12573CC004A8E26-9E5BBC343549D4F3C125729800496715-tvc07sspbs}, PUBLISHER = {Springer International}, ADDRESS = {Berlin}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {In this article, we present and discuss three statistical methods for Surface <br>Reconstruction. A typical input to a Surface Reconstruction technique consists <br>of a large set of points that has been sampled from a smooth surface and <br>contains uncertain data in the form of noise and outliers. We first present a <br>method that filters out uncertain and redundant information yielding a more <br>accurate and economical surface representation. Then we present two methods, <br>each of which converts the input point data to a standard shape representation; <br>the first produces an implicit representation while the second yields a <br>triangle mesh.}, JOURNAL = {The Visual Computer}, VOLUME = {23}, NUMBER = {6}, PAGES = {381--395}, }
Endnote
%0 Journal Article %A Saleem, Waqar %A Schall, Oliver %A Patan&#232;, Giuseppe %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T On Stochastic Methods for Surface Reconstruction : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-201B-5 %F EDOC: 356596 %R 10.1007/s00371-006-0094-3 %F OTHER: Local-ID: C12573CC004A8E26-9E5BBC343549D4F3C125729800496715-tvc07sspbs %D 2007 %* Review method: peer-reviewed %X In this article, we present and discuss three statistical methods for Surface <br>Reconstruction. A typical input to a Surface Reconstruction technique consists <br>of a large set of points that has been sampled from a smooth surface and <br>contains uncertain data in the form of noise and outliers. We first present a <br>method that filters out uncertain and redundant information yielding a more <br>accurate and economical surface representation. Then we present two methods, <br>each of which converts the input point data to a standard shape representation; <br>the first produces an implicit representation while the second yields a <br>triangle mesh. %J The Visual Computer %V 23 %N 6 %& 381 %P 381 - 395 %I Springer International %C Berlin %@ false %U https://rdcu.be/dINpQ
Saleem, W., Song, W., Belyaev, A., and Seidel, H.-P. 2007b. On Computing Best Fly. SCCG ’07: Proceedings of the 23rd Spring Conference on Computer Graphics, ACM.
Abstract
With growing popularity of online 3D shape databases, the problem of navigation <br>and remote visualisation of large 3D shape models in such repositories is <br>gaining prominence. While some recent work has focused on automatically <br>computing the best view(s) of a given model, little attention has been given to <br>the problem's dynamic counterpart - best fly. In this paper, we propose a <br>solution to this problem that extends on previous best view methods. Given a <br>shape, we use its best views to compute a path on its viewsphere which acts as <br>a trajectory for a virtual camera pointing at the object. We then use the <br>model's geometric properties to determine the speed and zoom of the camera <br>along the path.
Export
BibTeX
@inproceedings{Saleem-et-al_SCCG07, TITLE = {On Computing Best Fly}, AUTHOR = {Saleem, Waqar and Song, Wenhao and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-80-223-2292-8}, DOI = {10.1145/2614348.2614365}, LOCALID = {Local-ID: C12573CC004A8E26-3C0449683A7A0677C12573C400403B0B-ssbs_sccg07}, PUBLISHER = {ACM}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {With growing popularity of online 3D shape databases, the problem of navigation <br>and remote visualisation of large 3D shape models in such repositories is <br>gaining prominence. While some recent work has focused on automatically <br>computing the best view(s) of a given model, little attention has been given to <br>the problem's dynamic counterpart -- best fly. In this paper, we propose a <br>solution to this problem that extends on previous best view methods. Given a <br>shape, we use its best views to compute a path on its viewsphere which acts as <br>a trajectory for a virtual camera pointing at the object. We then use the <br>model's geometric properties to determine the speed and zoom of the camera <br>along the path.}, BOOKTITLE = {SCCG '07: Proceedings of the 23rd Spring Conference on Computer Graphics}, EDITOR = {Sbert, Mateu}, PAGES = {115--121}, ADDRESS = {Budmerice, Slovakia}, }
Endnote
%0 Conference Proceedings %A Saleem, Waqar %A Song, Wenhao %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T On Computing Best Fly : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-200C-7 %F EDOC: 356549 %F OTHER: Local-ID: C12573CC004A8E26-3C0449683A7A0677C12573C400403B0B-ssbs_sccg07 %R 10.1145/2614348.2614365 %D 2007 %B 23rd Spring Conference on Computer Graphics %Z date of event: 2007-04-26 - 2007-04-28 %C Budmerice, Slovakia %X With growing popularity of online 3D shape databases, the problem of navigation <br>and remote visualisation of large 3D shape models in such repositories is <br>gaining prominence. While some recent work has focused on automatically <br>computing the best view(s) of a given model, little attention has been given to <br>the problem's dynamic counterpart - best fly. In this paper, we propose a <br>solution to this problem that extends on previous best view methods. Given a <br>shape, we use its best views to compute a path on its viewsphere which acts as <br>a trajectory for a virtual camera pointing at the object. We then use the <br>model's geometric properties to determine the speed and zoom of the camera <br>along the path. %B SCCG '07: Proceedings of the 23rd Spring Conference on Computer Graphics %E Sbert, Mateu %P 115 - 121 %I ACM %@ 978-80-223-2292-8
Saleem, W., Wang, D., Belyaev, A., and Seidel, H.-P. 2007c. Automatic 2D Shape Orientation by Example. IEEE International Conference on Shape Modeling and Applications 2007 (SMI 2007), IEEE Computer Society.
Abstract
As large shape repositories become more common, the problem of automatically <br>generating good views of shapes has recently gained prominence. However, very <br>few of the proposed methods take into account the orientation of the shape in <br>the resulting view, and none presents a satisfactory solution. In this paper, <br>we present a simple, example based method to correct the orientation of a shape <br>in a query image. Our method depends on the availability of a database of <br>classified images containing correctly oriented shapes. In the first step, a <br>candidate class for the query shape is identified, and in the second, the query <br>shape is aligned with a target shape from the candidate class.
Export
BibTeX
@inproceedings{Saleem-et-al_SMI07, TITLE = {Automatic {2D} Shape Orientation by Example}, AUTHOR = {Saleem, Waqar and Wang, Danyi and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-2815-5}, DOI = {10.1109/SMI.2007.8}, LOCALID = {Local-ID: C12573CC004A8E26-7B832348893E9D47C125730E00542D4F-SaleemWangBelyaevSeidel2007}, PUBLISHER = {IEEE Computer Society}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {As large shape repositories become more common, the problem of automatically <br>generating good views of shapes has recently gained prominence. However, very <br>few of the proposed methods take into account the orientation of the shape in <br>the resulting view, and none presents a satisfactory solution. In this paper, <br>we present a simple, example based method to correct the orientation of a shape <br>in a query image. Our method depends on the availability of a database of <br>classified images containing correctly oriented shapes. In the first step, a <br>candidate class for the query shape is identified, and in the second, the query <br>shape is aligned with a target shape from the candidate class.}, BOOKTITLE = {IEEE International Conference on Shape Modeling and Applications 2007 (SMI 2007)}, PAGES = {221--225}, ADDRESS = {Lyon, France}, }
Endnote
%0 Conference Proceedings %A Saleem, Waqar %A Wang, Danyi %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Automatic 2D Shape Orientation by Example : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1E5B-6 %F EDOC: 356538 %R 10.1109/SMI.2007.8 %F OTHER: Local-ID: C12573CC004A8E26-7B832348893E9D47C125730E00542D4F-SaleemWangBelyaevSeidel2007 %D 2007 %B IEEE International Conference on Shape Modeling and Applications 2007 %Z date of event: 2007-06-13 - 2007-06-15 %C Lyon, France %X As large shape repositories become more common, the problem of automatically <br>generating good views of shapes has recently gained prominence. However, very <br>few of the proposed methods take into account the orientation of the shape in <br>the resulting view, and none presents a satisfactory solution. In this paper, <br>we present a simple, example based method to correct the orientation of a shape <br>in a query image. Our method depends on the availability of a database of <br>classified images containing correctly oriented shapes. In the first step, a <br>candidate class for the query shape is identified, and in the second, the query <br>shape is aligned with a target shape from the candidate class. %B IEEE International Conference on Shape Modeling and Applications 2007 %P 221 - 225 %I IEEE Computer Society %@ 0-7695-2815-5
Rosenhahn, B., Kersting, U., Powell, K., Klette, R., Klette, G., and Seidel, H.-P. 2007a. A System for Articulated Tracking Incorporating a Clothing Model. Machine Vision and Applications18, 1.
Abstract
In this paper an approach for motion capture of dressed people is presented. A <br>cloth draping method is incorporated in a silhouette based motion capture <br>system. This leads to a simultaneous estimation of pose, joint angles, cloth <br>draping parameters and wind forces. An error functional is formalized to <br>minimize the involved parameters simultaneously. This allows for reconstruction <br>of the underlying kinematic structure, even though it is covered with fabrics. <br>Finally, a quantitative error analysis is performed. Pose results are compared <br>with results obtained from a commercially available marker based tracking <br>system. The deviations have a magnitude of three degrees which indicates a <br>reasonably stable approach.
Export
BibTeX
@article{Rosenhahn-et-al_MVA07, TITLE = {A System for Articulated Tracking Incorporating a Clothing Model}, AUTHOR = {Rosenhahn, Bodo and Kersting, Uwe and Powell, Katie and Klette, Reinhard and Klette, Gisela and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0932-8092}, DOI = {10.1007/s00138-006-0046-y}, LOCALID = {Local-ID: C12573CC004A8E26-C495814AFA18F45DC125722D00531C89-RosenhahnMVA2006}, PUBLISHER = {Springer}, ADDRESS = {Berlin ; Heidelberg}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {In this paper an approach for motion capture of dressed people is presented. A <br>cloth draping method is incorporated in a silhouette based motion capture <br>system. This leads to a simultaneous estimation of pose, joint angles, cloth <br>draping parameters and wind forces. An error functional is formalized to <br>minimize the involved parameters simultaneously. This allows for reconstruction <br>of the underlying kinematic structure, even though it is covered with fabrics. <br>Finally, a quantitative error analysis is performed. Pose results are compared <br>with results obtained from a commercially available marker based tracking <br>system. The deviations have a magnitude of three degrees which indicates a <br>reasonably stable approach.}, JOURNAL = {Machine Vision and Applications}, VOLUME = {18}, NUMBER = {1}, PAGES = {25--40}, }
Endnote
%0 Journal Article %A Rosenhahn, Bodo %A Kersting, Uwe %A Powell, Katie %A Klette, Reinhard %A Klette, Gisela %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T A System for Articulated Tracking Incorporating a Clothing Model : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1E42-B %F EDOC: 356609 %R 10.1007/s00138-006-0046-y %F OTHER: Local-ID: C12573CC004A8E26-C495814AFA18F45DC125722D00531C89-RosenhahnMVA2006 %D 2007 %* Review method: peer-reviewed %X In this paper an approach for motion capture of dressed people is presented. A <br>cloth draping method is incorporated in a silhouette based motion capture <br>system. This leads to a simultaneous estimation of pose, joint angles, cloth <br>draping parameters and wind forces. An error functional is formalized to <br>minimize the involved parameters simultaneously. This allows for reconstruction <br>of the underlying kinematic structure, even though it is covered with fabrics. <br>Finally, a quantitative error analysis is performed. Pose results are compared <br>with results obtained from a commercially available marker based tracking <br>system. The deviations have a magnitude of three degrees which indicates a <br>reasonably stable approach. %J Machine Vision and Applications %O Machine Vis. Apps. %V 18 %N 1 %& 25 %P 25 - 40 %I Springer %C Berlin ; Heidelberg %@ false %U https://rdcu.be/dISpl
Rosenhahn, B., Brox, T., Cremers, D., and Seidel, H.-P. 2007b. Online Smoothing for Markerless Motion Capture. Pattern Recognition, Springer.
Export
BibTeX
@inproceedings{Rosenhahn-et-al_DAGM07, TITLE = {Online Smoothing for Markerless Motion Capture}, AUTHOR = {Rosenhahn, Bodo and Brox, Thomas and Cremers, Daniel and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3540749330}, DOI = {10.1007/978-3-540-74936-3_17}, LOCALID = {Local-ID: C12573CC004A8E26-792349364710601FC12573C40044CBC9-RosenhahnDAGM2007}, PUBLISHER = {Springer}, YEAR = {2007}, DATE = {2007}, BOOKTITLE = {Pattern Recognition}, EDITOR = {Hamprecht, Fred A. and Schn{\"o}rr, Christoph and J{\"a}hne, Bernd}, PAGES = {163--172}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {4713}, ADDRESS = {Heidelberg, Germany}, }
Endnote
%0 Conference Proceedings %A Rosenhahn, Bodo %A Brox, Thomas %A Cremers, Daniel %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Online Smoothing for Markerless Motion Capture : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2014-4 %F EDOC: 356515 %F OTHER: Local-ID: C12573CC004A8E26-792349364710601FC12573C40044CBC9-RosenhahnDAGM2007 %R 10.1007/978-3-540-74936-3_17 %D 2007 %B 29th DAGM Symposium on Pattern Recognition %Z date of event: 2007-09-12 - 2007-09-14 %C Heidelberg, Germany %B Pattern Recognition %E Hamprecht, Fred A.; Schn&#246;rr, Christoph; J&#228;hne, Bernd %P 163 - 172 %I Springer %@ 3540749330 %B Lecture Notes in Computer Science %N 4713 %U https://rdcu.be/dIMRl
Rosenhahn, B., Brox, T., and Seidel, H.-P. 2007c. Scaled Motion Dynamics for Markerless Motion Capture. 2007 IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2007), IEEE.
Export
BibTeX
@inproceedings{Rosenhahn-et-al_CVPR07, TITLE = {Scaled Motion Dynamics for Markerless Motion Capture}, AUTHOR = {Rosenhahn, Bodo and Brox, Thomas and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {1-4244-1180-7}, DOI = {10.1109/CVPR.2007.383128}, LOCALID = {Local-ID: C12573CC004A8E26-4DEE97241FC2E309C12573C4004A453A-RosenhahnCVPR2007}, PUBLISHER = {IEEE}, YEAR = {2007}, DATE = {2007}, BOOKTITLE = {2007 IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2007)}, PAGES = {1203--1210}, ADDRESS = {Minneapolis, MN, USA}, }
Endnote
%0 Conference Proceedings %A Rosenhahn, Bodo %A Brox, Thomas %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Scaled Motion Dynamics for Markerless Motion Capture : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2099-7 %F EDOC: 356525 %R 10.1109/CVPR.2007.383128 %F OTHER: Local-ID: C12573CC004A8E26-4DEE97241FC2E309C12573C4004A453A-RosenhahnCVPR2007 %D 2007 %B 2007 IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2007-06-17 - 2007-06-22 %C Minneapolis, MN, USA %B 2007 IEEE Conference on Computer Vision and Pattern Recognition %P 1203 - 1210 %I IEEE %@ 1-4244-1180-7
Popov, S., Günther, J., Seidel, H.-P., and Slusallek, P. 2007. Stackless KD-Tree Traversal for High Performance GPU Ray Tracing. Computer Graphics Forum, Blackwell.
Abstract
Significant advances have been achieved for realtime ray<br>tracing recently, but realtime performance for complex<br>scenes still requires large computational resources not yet<br>available from the CPUs in standard PCs. Incidentally, most<br>of these PCs also contain modern GPUs that do offer much<br>larger raw compute power. However, limitations in the<br>programming and memory model have so far kept the<br>performance of GPU ray tracers well below that of their CPU<br>counterparts.<br><br>In this paper we present a novel packet ray traversal<br>implementation that completely eliminates the need for<br>maintaining a stack during kd-tree traversal and that<br>reduces the number of traversal steps per ray. While CPUs<br>benefit moderately from the stackless approach, it improves<br>GPU performance significantly. We achieve a peak performance<br>of over 16 million rays per second for reasonably complex<br>scenes, including complex shading and secondary rays.<br>Several examples show that with this new technique GPUs can<br>actually outperform equivalent CPU based ray tracers.
Export
BibTeX
@inproceedings{Popov-et-al_Eurographics07, TITLE = {Stackless {KD}-Tree Traversal for High Performance {GPU} Ray Tracing}, AUTHOR = {Popov, Stefan and G{\"u}nther, Johannes and Seidel, Hans-Peter and Slusallek, Philipp}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2007.01064.x}, LOCALID = {Local-ID: C12573CC004A8E26-75606E8C6B6DADCDC12572F8003074FE-popov:07:GPURT}, PUBLISHER = {Blackwell}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {Significant advances have been achieved for realtime ray<br>tracing recently, but realtime performance for complex<br>scenes still requires large computational resources not yet<br>available from the CPUs in standard PCs. Incidentally, most<br>of these PCs also contain modern GPUs that do offer much<br>larger raw compute power. However, limitations in the<br>programming and memory model have so far kept the<br>performance of GPU ray tracers well below that of their CPU<br>counterparts.<br><br>In this paper we present a novel packet ray traversal<br>implementation that completely eliminates the need for<br>maintaining a stack during kd-tree traversal and that<br>reduces the number of traversal steps per ray. While CPUs<br>benefit moderately from the stackless approach, it improves<br>GPU performance significantly. We achieve a peak performance<br>of over 16 million rays per second for reasonably complex<br>scenes, including complex shading and secondary rays.<br>Several examples show that with this new technique GPUs can<br>actually outperform equivalent CPU based ray tracers.}, BOOKTITLE = {Eurographics 2007}, EDITOR = {Cohen-Or, Daniel and Slavik, Pavel}, PAGES = {415--424}, JOURNAL = {Computer Graphics Forum}, VOLUME = {26}, ISSUE = {3}, ADDRESS = {Prague, Czech Republic}, }
Endnote
%0 Conference Proceedings %A Popov, Stefan %A G&#252;nther, Johannes %A Seidel, Hans-Peter %A Slusallek, Philipp %+ International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Stackless KD-Tree Traversal for High Performance GPU Ray Tracing : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-20C6-3 %F EDOC: 356545 %F OTHER: Local-ID: C12573CC004A8E26-75606E8C6B6DADCDC12572F8003074FE-popov:07:GPURT %R 10.1111/j.1467-8659.2007.01064.x %D 2007 %B Eurographics 2007 %Z date of event: 2007-09-03 - 2007-09-07 %C Prague, Czech Republic %X Significant advances have been achieved for realtime ray<br>tracing recently, but realtime performance for complex<br>scenes still requires large computational resources not yet<br>available from the CPUs in standard PCs. Incidentally, most<br>of these PCs also contain modern GPUs that do offer much<br>larger raw compute power. However, limitations in the<br>programming and memory model have so far kept the<br>performance of GPU ray tracers well below that of their CPU<br>counterparts.<br><br>In this paper we present a novel packet ray traversal<br>implementation that completely eliminates the need for<br>maintaining a stack during kd-tree traversal and that<br>reduces the number of traversal steps per ray. While CPUs<br>benefit moderately from the stackless approach, it improves<br>GPU performance significantly. We achieve a peak performance<br>of over 16 million rays per second for reasonably complex<br>scenes, including complex shading and secondary rays.<br>Several examples show that with this new technique GPUs can<br>actually outperform equivalent CPU based ray tracers. %B Eurographics 2007 %E Cohen-Or, Daniel; Slavik, Pavel %P 415 - 424 %I Blackwell %J Computer Graphics Forum %V 26 %N 3 %I Blackwell-Wiley %@ false
Neff, M., Albrecht, I., and Seidel, H.-P. 2007. Layered Performance Animation with Correlation Maps. Computer Graphics Forum, Blackwell.
Export
BibTeX
@inproceedings{Neff-et-al_Eurographics07, TITLE = {Layered Performance Animation with Correlation Maps}, AUTHOR = {Neff, Michael and Albrecht, Irene and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2007.01091.x}, LOCALID = {Local-ID: C12573CC004A8E26-CF4386D097AA69E6C12573EF0040A85F-Neff2007z}, PUBLISHER = {Blackwell}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2007}, DATE = {2007}, BOOKTITLE = {Eurographics 2007}, EDITOR = {Cohen-Or, Daniel and Slavik, Pavel}, PAGES = {675--684}, JOURNAL = {Computer Graphics Forum}, VOLUME = {26}, ISSUE = {3}, ADDRESS = {Prague, Czech Republic}, }
Endnote
%0 Conference Proceedings %A Neff, Michael %A Albrecht, Irene %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Layered Performance Animation with Correlation Maps : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1FAB-8 %F EDOC: 356491 %F OTHER: Local-ID: C12573CC004A8E26-CF4386D097AA69E6C12573EF0040A85F-Neff2007z %R 10.1111/j.1467-8659.2007.01091.x %D 2007 %B Eurographics 2007 %Z date of event: 2007-09-03 - 2007-09-07 %C Prague, Czech Republic %B Eurographics 2007 %E Cohen-Or, Daniel; Slavik, Pavel %P 675 - 684 %I Blackwell %J Computer Graphics Forum %V 26 %N 3 %I Blackwell-Wiley %@ false
Mantiuk, R., Krawczyk, G., Myszkowski, K., and Seidel, H.-P. 2007a. High Dynamic Range Image and Video Compression - Fidelity Matching Human Visual Performance. 2007 IEEE International Conference on Image Processing (ICIP 2007), IEEE.
Abstract
Vast majority of digital images and video material stored today can capture <br>only a fraction of visual information visible to the human eye and does not <br>offer sufficient quality to fully exploit capabilities of new display devices. <br>High dynamic range (HDR) image and video formats encode the full visible range <br>of luminance and color gamut, thus offering ultimate fidelity, limited only by <br>the capabilities of the human eye and not by any existing technology. In this<br> paper we demonstrate how existing image and video compression standards can be <br>extended to encode HDR content efficiently. This is achieved by a custom color <br>space for encoding HDR pixel values that is derived from the visual performance <br>data. We also demonstrate how HDR image and video compression can be designed <br>so that it is backward compatible with existing formats.
Export
BibTeX
@inproceedings{Mantiuk-et-al_ICIP07, TITLE = {High Dynamic Range Image and Video Compression -- Fidelity Matching Human Visual Performance}, AUTHOR = {Mantiuk, Rafa{\l} and Krawczyk, Grzegorz and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4244-1437-6}, DOI = {10.1109/ICIP.2007.4378878}, LOCALID = {Local-ID: C12573CC004A8E26-8908FB59F4C64796C125739F003CC9EF-Mantiuk2007hdrivc}, PUBLISHER = {IEEE}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {Vast majority of digital images and video material stored today can capture <br>only a fraction of visual information visible to the human eye and does not <br>offer sufficient quality to fully exploit capabilities of new display devices. <br>High dynamic range (HDR) image and video formats encode the full visible range <br>of luminance and color gamut, thus offering ultimate fidelity, limited only by <br>the capabilities of the human eye and not by any existing technology. In this<br> paper we demonstrate how existing image and video compression standards can be <br>extended to encode HDR content efficiently. This is achieved by a custom color <br>space for encoding HDR pixel values that is derived from the visual performance <br>data. We also demonstrate how HDR image and video compression can be designed <br>so that it is backward compatible with existing formats.}, BOOKTITLE = {2007 IEEE International Conference on Image Processing (ICIP 2007)}, PAGES = {9--12}, ADDRESS = {San Antonio, TX, USA}, }
Endnote
%0 Conference Proceedings %A Mantiuk, Rafa&#322; %A Krawczyk, Grzegorz %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T High Dynamic Range Image and Video Compression - Fidelity Matching Human Visual Performance : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1F68-F %F EDOC: 356576 %R 10.1109/ICIP.2007.4378878 %F OTHER: Local-ID: C12573CC004A8E26-8908FB59F4C64796C125739F003CC9EF-Mantiuk2007hdrivc %D 2007 %B 2007 IEEE International Conference on Image Processing %Z date of event: 2007-09-16 - 2007-09-19 %C San Antonio, TX, USA %X Vast majority of digital images and video material stored today can capture <br>only a fraction of visual information visible to the human eye and does not <br>offer sufficient quality to fully exploit capabilities of new display devices. <br>High dynamic range (HDR) image and video formats encode the full visible range <br>of luminance and color gamut, thus offering ultimate fidelity, limited only by <br>the capabilities of the human eye and not by any existing technology. In this<br> paper we demonstrate how existing image and video compression standards can be <br>extended to encode HDR content efficiently. This is achieved by a custom color <br>space for encoding HDR pixel values that is derived from the visual performance <br>data. We also demonstrate how HDR image and video compression can be designed <br>so that it is backward compatible with existing formats. %B 2007 IEEE International Conference on Image Processing %P 9 - 12 %I IEEE %@ 978-1-4244-1437-6
Mantiuk, R., Krawczyk, G., Mantiuk, R., and Seidel, H.-P. 2007b. High Dynamic Range Imaging Pipeline: Perception-motivated Representation of Visual Content. Human Vision and Electronic Imaging XII, SPIE.
Abstract
The advances in high dynamic range (HDR) imaging, especially in the<br>display and camera technology, have a significant impact on the<br>existing imaging systems. The assumptions of the traditional<br>low-dynamic range imaging, designed for paper print as a major output<br>medium, are ill suited for the range of visual material that is shown<br>on modern displays. For example, the common assumption that the<br>brightest color in an image is white can be hardly justified for high<br>contrast LCD displays, not to mention next generation HDR displays,<br>that can easily create bright highlights and the impression of<br>self-luminous colors. We argue that high dynamic range representation<br>can encode images regardless of the technology used to create and<br>display them, with the accuracy that is only constrained by the<br>limitations of the human eye and not a particular output medium. To<br>facilitate the research on high dynamic range imaging, we have created<br>a software package (http://pfstools.sourceforge.net/), capable of<br>handling HDR data on all stages of image and video processing. The<br>software package is available as open source under the General Public<br>License and includes solutions for high quality image acquisition from<br>multiple exposures, a range of tone mapping algorithms and a visual<br>difference predictor for HDR images. We demonstrate how particular<br>elements of the imaging pipeline can be interfaced using standard<br>features of the operating system. Examples of shell scripts<br>demonstrate how the software can be used for processing single images<br>as well as video sequences.
Export
BibTeX
@inproceedings{Mantiuk-et-al_HVEI, TITLE = {High Dynamic Range Imaging Pipeline: Perception-motivated Representation of Visual Content}, AUTHOR = {Mantiuk, Rafa{\l} and Krawczyk, Grzegorz and Mantiuk, Radoslaw and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {9780819466051}, DOI = {10.1117/12.713526}, LOCALID = {Local-ID: C12573CC004A8E26-1391CD1854A6F5ADC125722F0039893D-Mantiuk2007spie}, PUBLISHER = {SPIE}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {The advances in high dynamic range (HDR) imaging, especially in the<br>display and camera technology, have a significant impact on the<br>existing imaging systems. The assumptions of the traditional<br>low-dynamic range imaging, designed for paper print as a major output<br>medium, are ill suited for the range of visual material that is shown<br>on modern displays. For example, the common assumption that the<br>brightest color in an image is white can be hardly justified for high<br>contrast LCD displays, not to mention next generation HDR displays,<br>that can easily create bright highlights and the impression of<br>self-luminous colors. We argue that high dynamic range representation<br>can encode images regardless of the technology used to create and<br>display them, with the accuracy that is only constrained by the<br>limitations of the human eye and not a particular output medium. To<br>facilitate the research on high dynamic range imaging, we have created<br>a software package (http://pfstools.sourceforge.net/), capable of<br>handling HDR data on all stages of image and video processing. The<br>software package is available as open source under the General Public<br>License and includes solutions for high quality image acquisition from<br>multiple exposures, a range of tone mapping algorithms and a visual<br>difference predictor for HDR images. We demonstrate how particular<br>elements of the imaging pipeline can be interfaced using standard<br>features of the operating system. Examples of shell scripts<br>demonstrate how the software can be used for processing single images<br>as well as video sequences.}, BOOKTITLE = {Human Vision and Electronic Imaging XII}, EDITOR = {Rogowitz, Bernice E. and Pappas, Thrasyvoulos N. and Daly, Scott J.}, PAGES = {649212.1--12}, SERIES = {SPIE}, EDITOR = {Rogowitz, Bernice E. and Pappas, Thrasyvoulos N. and Daly, Scott J.}, VOLUME = {6492}, ADDRESS = {San Jose, CA, USA}, }
Endnote
%0 Conference Proceedings %A Mantiuk, Rafa&#322; %A Krawczyk, Grzegorz %A Mantiuk, Radoslaw %A Seidel, Hans-Peter %+ Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T High Dynamic Range Imaging Pipeline: Perception-motivated Representation of Visual Content : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1F6A-B %F EDOC: 356611 %R 10.1117/12.713526 %F OTHER: Local-ID: C12573CC004A8E26-1391CD1854A6F5ADC125722F0039893D-Mantiuk2007spie %D 2007 %B Human Vision and Electronic Imaging XII %Z date of event: 2007-01-29 - 2007-02-01 %C San Jose, CA, USA %X The advances in high dynamic range (HDR) imaging, especially in the<br>display and camera technology, have a significant impact on the<br>existing imaging systems. The assumptions of the traditional<br>low-dynamic range imaging, designed for paper print as a major output<br>medium, are ill suited for the range of visual material that is shown<br>on modern displays. For example, the common assumption that the<br>brightest color in an image is white can be hardly justified for high<br>contrast LCD displays, not to mention next generation HDR displays,<br>that can easily create bright highlights and the impression of<br>self-luminous colors. We argue that high dynamic range representation<br>can encode images regardless of the technology used to create and<br>display them, with the accuracy that is only constrained by the<br>limitations of the human eye and not a particular output medium. To<br>facilitate the research on high dynamic range imaging, we have created<br>a software package (http://pfstools.sourceforge.net/), capable of<br>handling HDR data on all stages of image and video processing. The<br>software package is available as open source under the General Public<br>License and includes solutions for high quality image acquisition from<br>multiple exposures, a range of tone mapping algorithms and a visual<br>difference predictor for HDR images. We demonstrate how particular<br>elements of the imaging pipeline can be interfaced using standard<br>features of the operating system. Examples of shell scripts<br>demonstrate how the software can be used for processing single images<br>as well as video sequences. %B Human Vision and Electronic Imaging XII %E Rogowitz, Bernice E.; Pappas, Thrasyvoulos N.; Daly, Scott J. %P 649212.1 - 12 %I SPIE %@ 9780819466051 %B SPIE %Y Rogowitz, Bernice E.; Pappas, Thrasyvoulos N.; Daly, Scott J. %N 6492
Lintu, A., Hoffmann, L., Magnor, M., Lensch, H.P.A., and Seidel, H.-P. 2007a. 3D Reconstruction of Reflection Nebulae from a Single Image. Vision, Modeling, and Visualization 2007 : Proceedings, Max-Planck-Institut für Informatik.
Abstract
This paper presents a method for reconstructing the<br>3D distribution of dust densities in reflection nebulae<br>based on a single input image using an analysisby-<br>synthesis approach. In a reflection nebula, light<br>is typically emitted from a central star and then scattered<br>and partially absorbed by the nebula’s dust<br>particles. We model the light transport in this kind<br>of nebulae by considering absorption and single<br>scattering only. While the core problem of reconstructing<br>an arbitrary 3D volume of dust particles<br>from a 2D image would be ill-posed we demonstrate<br>how the special configuration of light transport<br>paths in reflection nebulae allows us to produce<br>non-exact but plausible 3D volumes. Our reconstruction<br>is driven by an iterative non-linear optimization<br>method, which renders an image in each<br>step with the current estimate of dust densities and<br>then updates the density values to minimize the error<br>to the input image. The recovered volumetric<br>datasets can be used in astrophysical research as<br>well as planetarium visualizations.
Export
BibTeX
@inproceedings{Lintu-et-al_VMV07, TITLE = {{3D} Reconstruction of Reflection Nebulae from a Single Image}, AUTHOR = {Lintu, Andrei and Hoffmann, Lars and Magnor, Marcus and Lensch, Hendrik P. A. and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-940739-00-1}, LOCALID = {Local-ID: C12573CC004A8E26-6A5B8EDF63DE6335C12573C90036CE36-Lintu:2007:RNS}, PUBLISHER = {Max-Planck-Institut f{\"u}r Informatik}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {This paper presents a method for reconstructing the<br>3D distribution of dust densities in reflection nebulae<br>based on a single input image using an analysisby-<br>synthesis approach. In a reflection nebula, light<br>is typically emitted from a central star and then scattered<br>and partially absorbed by the nebula{\textquoteright}s dust<br>particles. We model the light transport in this kind<br>of nebulae by considering absorption and single<br>scattering only. While the core problem of reconstructing<br>an arbitrary 3D volume of dust particles<br>from a 2D image would be ill-posed we demonstrate<br>how the special configuration of light transport<br>paths in reflection nebulae allows us to produce<br>non-exact but plausible 3D volumes. Our reconstruction<br>is driven by an iterative non-linear optimization<br>method, which renders an image in each<br>step with the current estimate of dust densities and<br>then updates the density values to minimize the error<br>to the input image. The recovered volumetric<br>datasets can be used in astrophysical research as<br>well as planetarium visualizations.}, BOOKTITLE = {Vision, Modeling, and Visualization 2007 : Proceedings}, EDITOR = {Lensch, Hendrik P. A. and Rosenhahn, Bodo and Seidel, Hans-Peter and Slusallek, Philipp and Weickert, Joachim}, PAGES = {109--116}, ADDRESS = {Saarbr{\"u}cken, Germany}, }
Endnote
%0 Conference Proceedings %A Lintu, Andrei %A Hoffmann, Lars %A Magnor, Marcus %A Lensch, Hendrik P. A. %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T 3D Reconstruction of Reflection Nebulae from a Single Image : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1DCE-C %F EDOC: 356531 %F OTHER: Local-ID: C12573CC004A8E26-6A5B8EDF63DE6335C12573C90036CE36-Lintu:2007:RNS %D 2007 %B 12th International Fall Workshop on Vision, Modeling, and Visualization %Z date of event: 2007-11-07 - 2007-11-09 %C Saarbr&#252;cken, Germany %X This paper presents a method for reconstructing the<br>3D distribution of dust densities in reflection nebulae<br>based on a single input image using an analysisby-<br>synthesis approach. In a reflection nebula, light<br>is typically emitted from a central star and then scattered<br>and partially absorbed by the nebula&#8217;s dust<br>particles. We model the light transport in this kind<br>of nebulae by considering absorption and single<br>scattering only. While the core problem of reconstructing<br>an arbitrary 3D volume of dust particles<br>from a 2D image would be ill-posed we demonstrate<br>how the special configuration of light transport<br>paths in reflection nebulae allows us to produce<br>non-exact but plausible 3D volumes. Our reconstruction<br>is driven by an iterative non-linear optimization<br>method, which renders an image in each<br>step with the current estimate of dust densities and<br>then updates the density values to minimize the error<br>to the input image. The recovered volumetric<br>datasets can be used in astrophysical research as<br>well as planetarium visualizations. %B Vision, Modeling, and Visualization 2007 : Proceedings %E Lensch, Hendrik P. A.; Rosenhahn, Bodo; Seidel, Hans-Peter; Slusallek, Philipp; Weickert, Joachim %P 109 - 116 %I Max-Planck-Institut f&#252;r Informatik %@ 978-3-940739-00-1
Lintu, A., Lensch, H.P.A., Magnor, M., El-Abed, S., and Seidel, H.-P. 2007b. 3D Reconstruction of Emission and Absorption in Planetary Nebulae. VG07: Eurographics/IEEE VGTC Symposium on Volume Graphics 2007, Eurographics Association.
Abstract
This paper addresses the problem of reconstructing the 3D structure of <br>planetary nebulae from 2D observations. Assuming axial symmetry, our method <br>jointly reconstructs the distribution of dust and ionized gas in the nebulae <br>from observations at two different wavelengths. In an inverse rendering <br>framework we optimize for the emission and absorption densities which are <br>correlated to the gas and dust distribution present in the nebulae. First, the <br>density distribution of the dust component is estimated based on an infrared <br>image, which traces only the dust distribution due to its intrinsic <br>temperature. In a second step, we optimize for the gas distribution by <br>comparing the rendering of the nebula to the visible wavelength image. During <br>this step, besides the emission of the ionized gas, we further include the <br>effect of absorption and scattering due to the already estimated dust <br>distribution. Using the same approach, we can as well start with a radio image <br>from which the gas distribution is derived without absorption, then deriving <br>the dust distribution from the visible wavelength image considering absorption <br>and scattering. The intermediate steps and the final reconstruction results are <br>visualized at real-time frame rates using a volume renderer. Using our method <br>we recover both gas and dust density distributions present in the nebula by <br>exploiting the distinct absorption or emission parameters at different <br>wavelengths.
Export
BibTeX
@inproceedings{Lintu-et-al_VG07, TITLE = {{3D} Reconstruction of Emission and Absorption in Planetary Nebulae}, AUTHOR = {Lintu, Andrei and Lensch, Hendrik P. A. and Magnor, Marcus and El-Abed, Sasha and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-56881-367-7}, DOI = {10.2312/VG/VG07/009-016}, LOCALID = {Local-ID: C12573CC004A8E26-8973100E612AA1D7C12573C9003C026F-Lintu:2007:REA}, PUBLISHER = {Eurographics Association}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {This paper addresses the problem of reconstructing the 3D structure of <br>planetary nebulae from 2D observations. Assuming axial symmetry, our method <br>jointly reconstructs the distribution of dust and ionized gas in the nebulae <br>from observations at two different wavelengths. In an inverse rendering <br>framework we optimize for the emission and absorption densities which are <br>correlated to the gas and dust distribution present in the nebulae. First, the <br>density distribution of the dust component is estimated based on an infrared <br>image, which traces only the dust distribution due to its intrinsic <br>temperature. In a second step, we optimize for the gas distribution by <br>comparing the rendering of the nebula to the visible wavelength image. During <br>this step, besides the emission of the ionized gas, we further include the <br>effect of absorption and scattering due to the already estimated dust <br>distribution. Using the same approach, we can as well start with a radio image <br>from which the gas distribution is derived without absorption, then deriving <br>the dust distribution from the visible wavelength image considering absorption <br>and scattering. The intermediate steps and the final reconstruction results are <br>visualized at real-time frame rates using a volume renderer. Using our method <br>we recover both gas and dust density distributions present in the nebula by <br>exploiting the distinct absorption or emission parameters at different <br>wavelengths.}, BOOKTITLE = {VG07: Eurographics/IEEE VGTC Symposium on Volume Graphics 2007}, EDITOR = {Fellner, Dieter and M{\"o}ller, Torsten and Fraser, Simon}, PAGES = {9--16}, ADDRESS = {Prague, Czech Republic}, }
Endnote
%0 Conference Proceedings %A Lintu, Andrei %A Lensch, Hendrik P. A. %A Magnor, Marcus %A El-Abed, Sasha %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T 3D Reconstruction of Emission and Absorption in Planetary Nebulae : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1DCB-1 %F EDOC: 356503 %R 10.2312/VG/VG07/009-016 %F OTHER: Local-ID: C12573CC004A8E26-8973100E612AA1D7C12573C9003C026F-Lintu:2007:REA %D 2007 %B VG07: Eurographics/IEEE VGTC Symposium on Volume Graphics 2007 %Z date of event: 2007-09-03 - 2007-09-04 %C Prague, Czech Republic %X This paper addresses the problem of reconstructing the 3D structure of <br>planetary nebulae from 2D observations. Assuming axial symmetry, our method <br>jointly reconstructs the distribution of dust and ionized gas in the nebulae <br>from observations at two different wavelengths. In an inverse rendering <br>framework we optimize for the emission and absorption densities which are <br>correlated to the gas and dust distribution present in the nebulae. First, the <br>density distribution of the dust component is estimated based on an infrared <br>image, which traces only the dust distribution due to its intrinsic <br>temperature. In a second step, we optimize for the gas distribution by <br>comparing the rendering of the nebula to the visible wavelength image. During <br>this step, besides the emission of the ionized gas, we further include the <br>effect of absorption and scattering due to the already estimated dust <br>distribution. Using the same approach, we can as well start with a radio image <br>from which the gas distribution is derived without absorption, then deriving <br>the dust distribution from the visible wavelength image considering absorption <br>and scattering. The intermediate steps and the final reconstruction results are <br>visualized at real-time frame rates using a volume renderer. Using our method <br>we recover both gas and dust density distributions present in the nebula by <br>exploiting the distinct absorption or emission parameters at different <br>wavelengths. %B VG07: Eurographics/IEEE VGTC Symposium on Volume Graphics 2007 %E Fellner, Dieter; M&#246;ller, Torsten; Fraser, Simon %P 9 - 16 %I Eurographics Association %@ 978-1-56881-367-7
Lensch, H.P.A., Rosenhahn, B., Seidel, H.-P., Slusallek, P., and Weickert, J., eds. 2007a. 12th International Fall Workshop on Vision, Modeling, and Visualization, VMV 2007. Aka GmbH.
Export
BibTeX
@proceedings{DBLP:conf/vmv/2007, TITLE = {12th International Fall Workshop on Vision, Modeling, and Visualization, VMV 2007}, EDITOR = {Lensch, Hendrik P. A. and Rosenhahn, Bodo and Seidel, Hans-Peter and Slusallek, Philipp and Weickert, Joachim}, LANGUAGE = {eng}, ISBN = {978-3-89838-085-0}, PUBLISHER = {Aka GmbH}, YEAR = {2007}, DATE = {2007}, ADDRESS = {Saarbr{\"u}cken, Germany}, }
Endnote
%0 Conference Proceedings %E Lensch, Hendrik P. A. %E Rosenhahn, Bodo %E Seidel, Hans-Peter %E Slusallek, Philipp %E Weickert, Joachim %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T 12th International Fall Workshop on Vision, Modeling, and Visualization, VMV 2007 : %G eng %U http://hdl.handle.net/21.11116/0000-000F-4D4F-4 %@ 978-3-89838-085-0 %I Aka GmbH %D 2007 %B 12th International Fall Workshop on Vision, Modeling, and Visualization %Z date of event: 2007-11-07 - 2007-11-09 %D 2007 %C Saarbr&#252;cken, Germany
Lensch, H.P.A., Rosenhahn, B., Seidel, H.-P., Slusallek, P., and Weickert, J., eds. 2007b. Vision, Modeling, and Visualization 2007. Max-Planck-Institut für Informatik.
Export
BibTeX
@proceedings{LenschVMV2007, TITLE = {Vision, Modeling, and Visualization 2007}, EDITOR = {Lensch, Hendrik P. A. and Rosenhahn, Bodo and Seidel, Hans-Peter and Slusallek, Philipp and Weickert, Joachim}, LANGUAGE = {eng}, ISBN = {978-3-940739-00-1; 978-3-89838-085-0}, LOCALID = {Local-ID: C12573CC004A8E26-6F608C7C1EC9297BC12573C400459111-LenschVMV2007}, PUBLISHER = {Max-Planck-Institut f{\"u}r Informatik}, YEAR = {2007}, DATE = {2007}, PAGES = {261 p.}, ADDRESS = {Saarbr{\"u}cken, Germany}, }
Endnote
%0 Conference Proceedings %E Lensch, Hendrik P. A. %E Rosenhahn, Bodo %E Seidel, Hans-Peter %E Slusallek, Philipp %E Weickert, Joachim %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Vision, Modeling, and Visualization 2007 : Proceedings ; November 7 - 9, 2007, Saarbr&#252;cken, Germany %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2132-5 %F EDOC: 356555 %@ 978-3-940739-00-1 %@ 978-3-89838-085-0 %F OTHER: Local-ID: C12573CC004A8E26-6F608C7C1EC9297BC12573C400459111-LenschVMV2007 %I Max-Planck-Institut f&#252;r Informatik %D 2007 %B Vision, Modeling, and Visualization 2007 %Z date of event: 2007-11-07 - 2007-11-09 %D 2007 %C Saarbr&#252;cken, Germany %P 261 p.
Langer, T., Belyaev, A., and Seidel, H.-P. 2007a. Mean Value Coordinates for Arbitrary Spherical Polygons and Polyhedra in IR3. Curve and Surface Design: Avignon 2006, Nashboro Press.
Abstract
Since their introduction, mean value coordinates have enjoyed ever increasing popularity in computer graphics and computational mathematics because they exhibit a variety of good properties. Most importantly, they are defined in the whole plane which allows interpolation and extrapolation without restrictions. Recently, mean value coordinates were generalized to spheres and to $\mathbb{R}^3$. We show that these spherical and 3D mean value coordinates are well defined on the whole sphere and the whole space $\mathbb{R}^3$, respectively.
Export
BibTeX
@inproceedings{LangerCS2007, TITLE = {Mean Value Coordinates for Arbitrary Spherical Polygons and Polyhedra in {IR3}}, AUTHOR = {Langer, Torsten and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-0-9728482-7-5}, LOCALID = {Local-ID: C12573CC004A8E26-371F4C8E05400F5FC125729100620364-LangerCS2007}, PUBLISHER = {Nashboro Press}, YEAR = {2006}, DATE = {2007}, ABSTRACT = {Since their introduction, mean value coordinates have enjoyed ever increasing popularity in computer graphics and computational mathematics because they exhibit a variety of good properties. Most importantly, they are defined in the whole plane which allows interpolation and extrapolation without restrictions. Recently, mean value coordinates were generalized to spheres and to $\mathbb{R}^3$. We show that these spherical and 3D mean value coordinates are well defined on the whole sphere and the whole space $\mathbb{R}^3$, respectively.}, BOOKTITLE = {Curve and Surface Design: Avignon 2006}, EDITOR = {Chenin, Patrick and Lyche, Tom and Schumaker, Larry L.}, PAGES = {193--202}, SERIES = {Modern Methods in Mathematics}, }
Endnote
%0 Conference Proceedings %A Langer, Torsten %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Mean Value Coordinates for Arbitrary Spherical Polygons and Polyhedra in IR3 : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1FD4-A %F EDOC: 356602 %F OTHER: Local-ID: C12573CC004A8E26-371F4C8E05400F5FC125729100620364-LangerCS2007 %I Nashboro Press %D 2007 %B Untitled Event %Z date of event: 2006-06-29 - 2006-07-05 %C Avignon, France %X Since their introduction, mean value coordinates have enjoyed ever increasing popularity in computer graphics and computational mathematics because they exhibit a variety of good properties. Most importantly, they are defined in the whole plane which allows interpolation and extrapolation without restrictions. Recently, mean value coordinates were generalized to spheres and to $\mathbb{R}^3$. We show that these spherical and 3D mean value coordinates are well defined on the whole sphere and the whole space $\mathbb{R}^3$, respectively. %B Curve and Surface Design: Avignon 2006 %E Chenin, Patrick; Lyche, Tom; Schumaker, Larry L. %P 193 - 202 %I Nashboro Press %@ 978-0-9728482-7-5 %B Modern Methods in Mathematics
Langer, T., Belyaev, A., and Seidel, H.-P. 2007b. Exact and Interpolatory Quadratures for Curvature Tensor Estimation. Computer Aided Geometric Design24, 8-9.
Abstract
The computation of the curvature of smooth surfaces has a long history <br> in differential geometry and is essential for many<br> geometric modeling applications such as feature detection. We present a novel <br>approach to calculate the<br> mean curvature from arbitrary normal curvatures. Then, we demonstrate how the <br>same method can be used to <br> obtain new formulae to compute the Gaussian curvature and the curvature <br>tensor. <br> The idea is to compute the curvature integrals by a weighted sum by making <br>use of the periodic<br> structure of the normal curvatures to make the quadratures exact.<br> Finally, we derive an approximation formula for<br> the curvature of discrete data like meshes and <br> show its convergence if quadratically converging normals are available.
Export
BibTeX
@article{Langer-et-al_CAGD07, TITLE = {Exact and Interpolatory Quadratures for Curvature Tensor Estimation}, AUTHOR = {Langer, Torsten and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-8396}, DOI = {10.1016/j.cagd.2006.09.006}, LOCALID = {Local-ID: C12573CC004A8E26-DE732F6677ADEACAC125729100611483-LangerCAGD2007}, PUBLISHER = {North-Holland}, ADDRESS = {Amsterdam}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {The computation of the curvature of smooth surfaces has a long history <br> in differential geometry and is essential for many<br> geometric modeling applications such as feature detection. We present a novel <br>approach to calculate the<br> mean curvature from arbitrary normal curvatures. Then, we demonstrate how the <br>same method can be used to <br> obtain new formulae to compute the Gaussian curvature and the curvature <br>tensor. <br> The idea is to compute the curvature integrals by a weighted sum by making <br>use of the periodic<br> structure of the normal curvatures to make the quadratures exact.<br> Finally, we derive an approximation formula for<br> the curvature of discrete data like meshes and <br> show its convergence if quadratically converging normals are available.}, JOURNAL = {Computer Aided Geometric Design}, VOLUME = {24}, NUMBER = {8-9}, PAGES = {443--463}, }
Endnote
%0 Journal Article %A Langer, Torsten %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Exact and Interpolatory Quadratures for Curvature Tensor Estimation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1F1E-6 %F EDOC: 356607 %R 10.1016/j.cagd.2006.09.006 %F OTHER: Local-ID: C12573CC004A8E26-DE732F6677ADEACAC125729100611483-LangerCAGD2007 %D 2007 %* Review method: peer-reviewed %X The computation of the curvature of smooth surfaces has a long history <br> in differential geometry and is essential for many<br> geometric modeling applications such as feature detection. We present a novel <br>approach to calculate the<br> mean curvature from arbitrary normal curvatures. Then, we demonstrate how the <br>same method can be used to <br> obtain new formulae to compute the Gaussian curvature and the curvature <br>tensor. <br> The idea is to compute the curvature integrals by a weighted sum by making <br>use of the periodic<br> structure of the normal curvatures to make the quadratures exact.<br> Finally, we derive an approximation formula for<br> the curvature of discrete data like meshes and <br> show its convergence if quadratically converging normals are available. %J Computer Aided Geometric Design %V 24 %N 8-9 %& 443 %P 443 - 463 %I North-Holland %C Amsterdam %@ false
Langer, T. and Seidel, H.-P. 2007a. Construction of smooth maps with mean value coordinates. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
Bernstein polynomials are a classical tool in Computer Aided Design to create smooth maps with a high degree of local control. They are used for the construction of B\'ezier surfaces, free-form deformations, and many other applications. However, classical Bernstein polynomials are only defined for simplices and parallelepipeds. These can in general not directly capture the shape of arbitrary objects. Instead, a tessellation of the desired domain has to be done first. We construct smooth maps on arbitrary sets of polytopes such that the restriction to each of the polytopes is a Bernstein polynomial in mean value coordinates (or any other generalized barycentric coordinates). In particular, we show how smooth transitions between different domain polytopes can be ensured.
Export
BibTeX
@techreport{LangerSeidel2007, TITLE = {Construction of smooth maps with mean value coordinates}, AUTHOR = {Langer, Torsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2007-4-002}, NUMBER = {MPI-I-2007-4-002}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {Bernstein polynomials are a classical tool in Computer Aided Design to create smooth maps with a high degree of local control. They are used for the construction of B\'ezier surfaces, free-form deformations, and many other applications. However, classical Bernstein polynomials are only defined for simplices and parallelepipeds. These can in general not directly capture the shape of arbitrary objects. Instead, a tessellation of the desired domain has to be done first. We construct smooth maps on arbitrary sets of polytopes such that the restriction to each of the polytopes is a Bernstein polynomial in mean value coordinates (or any other generalized barycentric coordinates). In particular, we show how smooth transitions between different domain polytopes can be ensured.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Langer, Torsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Construction of smooth maps with mean value coordinates : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-66DF-1 %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2007-4-002 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2007 %P 22 p. %X Bernstein polynomials are a classical tool in Computer Aided Design to create smooth maps with a high degree of local control. They are used for the construction of B\'ezier surfaces, free-form deformations, and many other applications. However, classical Bernstein polynomials are only defined for simplices and parallelepipeds. These can in general not directly capture the shape of arbitrary objects. Instead, a tessellation of the desired domain has to be done first. We construct smooth maps on arbitrary sets of polytopes such that the restriction to each of the polytopes is a Bernstein polynomial in mean value coordinates (or any other generalized barycentric coordinates). In particular, we show how smooth transitions between different domain polytopes can be ensured. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Langer, T. and Seidel, H.-P. 2007b. Mean Value Bézier Surfaces. Mathematics of Surfaces XII, Springer.
Abstract
B\'ezier surfaces are an important design tool in Computer Aided Design. <br>They are parameterized surfaces where the parameterization can be represented <br>as a homogeneous polynomial in barycentric coordinates. <br>Usually, Wachspress coordinates are used to obtain tensor product B\'ezier <br>surfaces over rectangular domains. <br>Recently, Floater introduced mean value coordinates as an alternative to <br>Wachspress coordinates. <br>When used to construct B\'ezier patches, they offer additional control points <br>without <br>raising the polynomial degree. We investigate the potential of mean value <br>coordinates <br>to design mean value B\'ezier surfaces.
Export
BibTeX
@inproceedings{Langer-Seidel_IMA07, TITLE = {Mean Value B{\'e}zier Surfaces}, AUTHOR = {Langer, Torsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-540-73842-8}, DOI = {10.1007/978-3-540-73843-5_16}, LOCALID = {Local-ID: C12573CC004A8E26-1C7F4987849BCFFCC12572C9004E3DFE-LangerMoS07}, PUBLISHER = {Springer}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {B\'ezier surfaces are an important design tool in Computer Aided Design. <br>They are parameterized surfaces where the parameterization can be represented <br>as a homogeneous polynomial in barycentric coordinates. <br>Usually, Wachspress coordinates are used to obtain tensor product B\'ezier <br>surfaces over rectangular domains. <br>Recently, Floater introduced mean value coordinates as an alternative to <br>Wachspress coordinates. <br>When used to construct B\'ezier patches, they offer additional control points <br>without <br>raising the polynomial degree. We investigate the potential of mean value <br>coordinates <br>to design mean value B\'ezier surfaces.}, BOOKTITLE = {Mathematics of Surfaces XII}, EDITOR = {Martin, Ralph and Sabin, Malcolm and Winkler, Joab}, PAGES = {263--274}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {4647}, ADDRESS = {Sheffield, UK}, }
Endnote
%0 Conference Proceedings %A Langer, Torsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Mean Value B&#233;zier Surfaces : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1FD0-1 %F EDOC: 356547 %R 10.1007/978-3-540-73843-5_16 %F OTHER: Local-ID: C12573CC004A8E26-1C7F4987849BCFFCC12572C9004E3DFE-LangerMoS07 %D 2007 %B 12th IMA International Conference %Z date of event: 2007-09-04 - 2007-09-06 %C Sheffield, UK %X B\'ezier surfaces are an important design tool in Computer Aided Design. <br>They are parameterized surfaces where the parameterization can be represented <br>as a homogeneous polynomial in barycentric coordinates. <br>Usually, Wachspress coordinates are used to obtain tensor product B\'ezier <br>surfaces over rectangular domains. <br>Recently, Floater introduced mean value coordinates as an alternative to <br>Wachspress coordinates. <br>When used to construct B\'ezier patches, they offer additional control points <br>without <br>raising the polynomial degree. We investigate the potential of mean value <br>coordinates <br>to design mean value B\'ezier surfaces. %B Mathematics of Surfaces XII %E Martin, Ralph; Sabin, Malcolm; Winkler, Joab %P 263 - 274 %I Springer %@ 3-540-73842-8 %B Lecture Notes in Computer Science %N 4647 %U https://rdcu.be/dIMjy
Krawczyk, G., Myszkowski, K., and Seidel, H.-P. 2007a. Contrast Restoration by Adaptive Countershading. Computer Graphics Forum, Blackwell.
Abstract
We address the problem of communicating contrasts in images degraded with <br>respect to their original due to<br>processing with computer graphics algorithms. Such degradation can happen <br>during the tone mapping of high dynamic<br>range images, or while rendering scenes with low contrast shaders or with poor <br>lighting. Inspired by a family<br>of known perceptual illusions: Craik-O'Brien-Cornsweet, we enhance contrasts by <br>modulating brightness at<br>the edges to create countershading profiles. We generalize unsharp masking by <br>coupling it with a multi-resolution<br>local contrast metric to automatically create the countershading profiles from <br>the sub-band components which are<br>individually adjusted to each corrected feature to best enhance contrast with <br>respect to the reference. Additionally,<br>we employ a visual detection model to assure that our enhancements are not <br>perceived as objectionable halo<br>artifacts. The overall appearance of images remains mostly unchanged and the <br>enhancement is achieved within<br>the available dynamic range. We use our method to post-correct tone mapped <br>images and improve images using<br>their depth information.
Export
BibTeX
@inproceedings{Krawczyk-et-al_Eurographics07, TITLE = {Contrast Restoration by Adaptive Countershading}, AUTHOR = {Krawczyk, Grzegorz and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2007.01081.x}, LOCALID = {Local-ID: C12573CC004A8E26-3AAFD82CDB4A81EBC12573C4005DCCE4-KrawczykEG2007}, PUBLISHER = {Blackwell}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {We address the problem of communicating contrasts in images degraded with <br>respect to their original due to<br>processing with computer graphics algorithms. Such degradation can happen <br>during the tone mapping of high dynamic<br>range images, or while rendering scenes with low contrast shaders or with poor <br>lighting. Inspired by a family<br>of known perceptual illusions: Craik-O'Brien-Cornsweet, we enhance contrasts by <br>modulating brightness at<br>the edges to create countershading profiles. We generalize unsharp masking by <br>coupling it with a multi-resolution<br>local contrast metric to automatically create the countershading profiles from <br>the sub-band components which are<br>individually adjusted to each corrected feature to best enhance contrast with <br>respect to the reference. Additionally,<br>we employ a visual detection model to assure that our enhancements are not <br>perceived as objectionable halo<br>artifacts. The overall appearance of images remains mostly unchanged and the <br>enhancement is achieved within<br>the available dynamic range. We use our method to post-correct tone mapped <br>images and improve images using<br>their depth information.}, BOOKTITLE = {Eurographics 2007}, EDITOR = {Cohen-Or, Daniel and Slavik, Pavel}, PAGES = {581--590}, JOURNAL = {Computer Graphics Forum}, VOLUME = {26}, ISSUE = {3}, ADDRESS = {Prague, Czech Republic}, }
Endnote
%0 Conference Proceedings %A Krawczyk, Grzegorz %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Contrast Restoration by Adaptive Countershading : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1EBC-C %F EDOC: 356532 %F OTHER: Local-ID: C12573CC004A8E26-3AAFD82CDB4A81EBC12573C4005DCCE4-KrawczykEG2007 %R 10.1111/j.1467-8659.2007.01081.x %D 2007 %B Eurographics 2007 %Z date of event: 2007-09-03 - 2007-09-07 %C Prague, Czech Republic %X We address the problem of communicating contrasts in images degraded with <br>respect to their original due to<br>processing with computer graphics algorithms. Such degradation can happen <br>during the tone mapping of high dynamic<br>range images, or while rendering scenes with low contrast shaders or with poor <br>lighting. Inspired by a family<br>of known perceptual illusions: Craik-O'Brien-Cornsweet, we enhance contrasts by <br>modulating brightness at<br>the edges to create countershading profiles. We generalize unsharp masking by <br>coupling it with a multi-resolution<br>local contrast metric to automatically create the countershading profiles from <br>the sub-band components which are<br>individually adjusted to each corrected feature to best enhance contrast with <br>respect to the reference. Additionally,<br>we employ a visual detection model to assure that our enhancements are not <br>perceived as objectionable halo<br>artifacts. The overall appearance of images remains mostly unchanged and the <br>enhancement is achieved within<br>the available dynamic range. We use our method to post-correct tone mapped <br>images and improve images using<br>their depth information. %B Eurographics 2007 %E Cohen-Or, Daniel; Slavik, Pavel %P 581 - 590 %I Blackwell %J Computer Graphics Forum %V 26 %N 3 %I Blackwell-Wiley %@ false
Krawczyk, G., Mantiuk, R., Zdrojewska, D., and Seidel, H.-P. 2007b. Brightness Adjustment for HDR and Tone Mapped Images. Pacific Graphics 2007 (PG 2007), IEEE Computer Society.
Export
BibTeX
@inproceedings{Krawczyk-et-al_PG07, TITLE = {Brightness Adjustment for {HDR} and Tone Mapped Images}, AUTHOR = {Krawczyk, Grzegorz and Mantiuk, Rafa{\l} and Zdrojewska, Dorota and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-3009-5}, DOI = {10.1109/PG.2007.38}, LOCALID = {Local-ID: C12573CC004A8E26-7616B324D4FB942BC12573AE00532E79-krawczyk2006ba}, PUBLISHER = {IEEE Computer Society}, YEAR = {2007}, DATE = {2007}, BOOKTITLE = {Pacific Graphics 2007 (PG 2007)}, EDITOR = {Alexa, Marc and Gortler, Steven and Ju, Tao}, PAGES = {373--381}, ADDRESS = {Maui, HI, USA}, }
Endnote
%0 Conference Proceedings %A Krawczyk, Grzegorz %A Mantiuk, Rafa&#322; %A Zdrojewska, Dorota %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Brightness Adjustment for HDR and Tone Mapped Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1E7C-B %F EDOC: 356530 %F OTHER: Local-ID: C12573CC004A8E26-7616B324D4FB942BC12573AE00532E79-krawczyk2006ba %R 10.1109/PG.2007.38 %D 2007 %B 15th Pacific Conference on Computer Graphics and Applications %Z date of event: 2007-10-29 - 2007-11-02 %C Maui, HI, USA %B Pacific Graphics 2007 %E Alexa, Marc; Gortler, Steven; Ju, Tao %P 373 - 381 %I IEEE Computer Society %@ 0-7695-3009-5
Kerber, J., Belyaev, A., and Seidel, H.-P. 2007. Feature Preserving Depth Compression of Range Images. SCCG ’07: Proceedings of the 23rd Spring Conference on Computer Graphics, ACM.
Abstract
In this paper we present a new and efficient method for the<br>depth-compression of range images in a feature preserving way.<br>Given a range image (a depth field), the problem studied in the paper<br>consists of achieving a high compression of the depth data while<br>preserving (or even enhancing) perceptually important features<br>of the image. Our approach works in the gradient domain.<br>It combines a linear rescaling scheme with a simple enhancing<br>technique applied to the gradient of the image. The new depth field<br>is obtained from the enhanced and rescaled derivatives of initial range<br>image. By four parameters a user can steer the compression ratio and<br>the amount of details to be perceivable in the outcome.<br>Experiments have shown that our method works very well even<br>for high compression ratios. Applications can be of artistic<br>nature e.g. embossment, engraving or carving.
Export
BibTeX
@inproceedings{Kerber-et-al_SCCG07, TITLE = {Feature Preserving Depth Compression of Range Images}, AUTHOR = {Kerber, Jens and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-80-223-2292-8}, DOI = {10.1145/2614348.2614363}, LOCALID = {Local-ID: C12573CC004A8E26-9117101D453F9ED4C12573C500540244-Kerber2007_1}, PUBLISHER = {ACM}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {In this paper we present a new and efficient method for the<br>depth-compression of range images in a feature preserving way.<br>Given a range image (a depth field), the problem studied in the paper<br>consists of achieving a high compression of the depth data while<br>preserving (or even enhancing) perceptually important features<br>of the image. Our approach works in the gradient domain.<br>It combines a linear rescaling scheme with a simple enhancing<br>technique applied to the gradient of the image. The new depth field<br>is obtained from the enhanced and rescaled derivatives of initial range<br>image. By four parameters a user can steer the compression ratio and<br>the amount of details to be perceivable in the outcome.<br>Experiments have shown that our method works very well even<br>for high compression ratios. Applications can be of artistic<br>nature e.g. embossment, engraving or carving.}, BOOKTITLE = {SCCG '07: Proceedings of the 23rd Spring Conference on Computer Graphics}, EDITOR = {Sbert, Mateu}, PAGES = {101--105}, ADDRESS = {Budmerice, Slovakia}, }
Endnote
%0 Conference Proceedings %A Kerber, Jens %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Feature Preserving Depth Compression of Range Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1F3D-2 %F EDOC: 356511 %F OTHER: Local-ID: C12573CC004A8E26-9117101D453F9ED4C12573C500540244-Kerber2007_1 %R 10.1145/2614348.2614363 %D 2007 %B 23rd Spring Conference on Computer Graphics %Z date of event: 2007-04-26 - 2007-04-28 %C Budmerice, Slovakia %X In this paper we present a new and efficient method for the<br>depth-compression of range images in a feature preserving way.<br>Given a range image (a depth field), the problem studied in the paper<br>consists of achieving a high compression of the depth data while<br>preserving (or even enhancing) perceptually important features<br>of the image. Our approach works in the gradient domain.<br>It combines a linear rescaling scheme with a simple enhancing<br>technique applied to the gradient of the image. The new depth field<br>is obtained from the enhanced and rescaled derivatives of initial range<br>image. By four parameters a user can steer the compression ratio and<br>the amount of details to be perceivable in the outcome.<br>Experiments have shown that our method works very well even<br>for high compression ratios. Applications can be of artistic<br>nature e.g. embossment, engraving or carving. %B SCCG '07: Proceedings of the 23rd Spring Conference on Computer Graphics %E Sbert, Mateu %P 101 - 105 %I ACM %@ 978-80-223-2292-8
Ihrke, I., Ziegler, G., Tevs, A., Theobalt, C., Magnor, M., and Seidel, H.-P. 2007. Eikonal Rendering: Efficient Light Transport in Refractive Objects. ACM Transactions on Graphics, ACM.
Abstract
We present a new method for real-time rendering of sophisticated lighting <br>effects in and around refractive objects. It enables us to realistically <br>display refractive objects with complex material properties, such as <br>arbitrarily varying refraction index, inhomogeneous attenuation, as well as <br>spatially-varying anisotropic scattering and reflectance properties. <br>User-controlled changes of lighting positions only require a few seconds of <br>update time. Our method is based on a set of ordinary differential equations <br>derived from the eikonal equation, the main postulate of geometric optics. This <br>set of equations allows for fast casting of bent light rays with the complexity <br>of a particle tracer. Based on this concept, we also propose an efficient light <br>propagation technique using adaptive wavefront tracing. Efficient GPU <br>implementations for our algorithmic concepts enable us to render visual effects <br>that were previously not reproducible in this combination in real-time.
Export
BibTeX
@inproceedings{Ihrke-et-al_SIGGRAPH07, TITLE = {Eikonal Rendering: Efficient Light Transport in Refractive Objects}, AUTHOR = {Ihrke, Ivo and Ziegler, Gernot and Tevs, Art and Theobalt, Christian and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, ISBN = {978-1-4503-7836-9}, DOI = {10.1145/1275808.1276451}, LOCALID = {Local-ID: C12573CC004A8E26-DEAE1CC8B13FE3B9C12573B20007958E-Eiko2007}, PUBLISHER = {ACM}, PUBLISHER = {Association for Computing Machinery}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {We present a new method for real-time rendering of sophisticated lighting <br>effects in and around refractive objects. It enables us to realistically <br>display refractive objects with complex material properties, such as <br>arbitrarily varying refraction index, inhomogeneous attenuation, as well as <br>spatially-varying anisotropic scattering and reflectance properties. <br>User-controlled changes of lighting positions only require a few seconds of <br>update time. Our method is based on a set of ordinary differential equations <br>derived from the eikonal equation, the main postulate of geometric optics. This <br>set of equations allows for fast casting of bent light rays with the complexity <br>of a particle tracer. Based on this concept, we also propose an efficient light <br>propagation technique using adaptive wavefront tracing. Efficient GPU <br>implementations for our algorithmic concepts enable us to render visual effects <br>that were previously not reproducible in this combination in real-time.}, BOOKTITLE = {SIGGRAPH '07: ACM SIGGRAPH 2007 papers}, PAGES = {59.1--59.9}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {26}, ISSUE = {3}, ADDRESS = {San Diego, CA, USA}, }
Endnote
%0 Conference Proceedings %A Ihrke, Ivo %A Ziegler, Gernot %A Tevs, Art %A Theobalt, Christian %A Magnor, Marcus %A Seidel, Hans-Peter %+ Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Eikonal Rendering: Efficient Light Transport in Refractive Objects : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1F0C-E %F EDOC: 356523 %R 10.1145/1275808.1276451 %F OTHER: Local-ID: C12573CC004A8E26-DEAE1CC8B13FE3B9C12573B20007958E-Eiko2007 %D 2007 %B ACM SIGGRAPH 2007 %Z date of event: 2007-08-05 - 2007-08-09 %C San Diego, CA, USA %X We present a new method for real-time rendering of sophisticated lighting <br>effects in and around refractive objects. It enables us to realistically <br>display refractive objects with complex material properties, such as <br>arbitrarily varying refraction index, inhomogeneous attenuation, as well as <br>spatially-varying anisotropic scattering and reflectance properties. <br>User-controlled changes of lighting positions only require a few seconds of <br>update time. Our method is based on a set of ordinary differential equations <br>derived from the eikonal equation, the main postulate of geometric optics. This <br>set of equations allows for fast casting of bent light rays with the complexity <br>of a particle tracer. Based on this concept, we also propose an efficient light <br>propagation technique using adaptive wavefront tracing. Efficient GPU <br>implementations for our algorithmic concepts enable us to render visual effects <br>that were previously not reproducible in this combination in real-time. %B SIGGRAPH '07: ACM SIGGRAPH 2007 papers %P 59.1 - 59.9 %I ACM %@ 978-1-4503-7836-9 %J ACM Transactions on Graphics %V 26 %N 3 %I Association for Computing Machinery %@ false
Herzog, R. and Seidel, H.-P. 2007. Lighting Details Preserving Photon Density Estimation. Pacific Graphics 2007 (PG 2007), IEEE Computer Society.
Abstract
Standard density estimation approaches suffer from visible bias due to low-pass <br>filtering of the lighting function. Therefore, most photon density estimation <br>methods have been used primarily with inefficient Monte Carlo final gathering <br>to achieve high-quality results for the indirect illumination. We present a <br>density estimation technique for efficiently computing all-frequency global <br>illumination in diffuse and moderately glossy scenes. In particular, we compute <br>the direct, indirect, and caustics illumination during photon tracing from the <br>light sources. Since the high frequencies in the illumination often arise from <br>visibility changes and surface normal variations, we consider a kernel that <br>takes these factors into account. To efficiently detect visibility changes, we <br>introduce a hierarchical voxel data structure of the scene geometry, which is <br>generated on GPU. Further, we preserve the surface orientation by computing the <br>density estimation in ray space.
Export
BibTeX
@inproceedings{Herzog-Seidel_PG07, TITLE = {Lighting Details Preserving Photon Density Estimation}, AUTHOR = {Herzog, Robert and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-3009-5}, DOI = {10.1109/PG.2007.57}, LOCALID = {Local-ID: C12573CC004A8E26-85D4279BB37D5A93C12573C4005A1726-HerzogPG2007}, PUBLISHER = {IEEE Computer Society}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {Standard density estimation approaches suffer from visible bias due to low-pass <br>filtering of the lighting function. Therefore, most photon density estimation <br>methods have been used primarily with inefficient Monte Carlo final gathering <br>to achieve high-quality results for the indirect illumination. We present a <br>density estimation technique for efficiently computing all-frequency global <br>illumination in diffuse and moderately glossy scenes. In particular, we compute <br>the direct, indirect, and caustics illumination during photon tracing from the <br>light sources. Since the high frequencies in the illumination often arise from <br>visibility changes and surface normal variations, we consider a kernel that <br>takes these factors into account. To efficiently detect visibility changes, we <br>introduce a hierarchical voxel data structure of the scene geometry, which is <br>generated on GPU. Further, we preserve the surface orientation by computing the <br>density estimation in ray space.}, BOOKTITLE = {Pacific Graphics 2007 (PG 2007)}, EDITOR = {Alexa, Marc and Gortler, Steven and Ju, Tao}, PAGES = {407--410}, ADDRESS = {Maui, Hawaii, USA}, }
Endnote
%0 Conference Proceedings %A Herzog, Robert %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Lighting Details Preserving Photon Density Estimation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1FB3-3 %F EDOC: 356514 %F OTHER: Local-ID: C12573CC004A8E26-85D4279BB37D5A93C12573C4005A1726-HerzogPG2007 %R 10.1109/PG.2007.57 %D 2007 %B 15th Pacific Conference on Computer Graphics and Applications %Z date of event: 2007-10-29 - 2007-11-02 %C Maui, Hawaii, USA %X Standard density estimation approaches suffer from visible bias due to low-pass <br>filtering of the lighting function. Therefore, most photon density estimation <br>methods have been used primarily with inefficient Monte Carlo final gathering <br>to achieve high-quality results for the indirect illumination. We present a <br>density estimation technique for efficiently computing all-frequency global <br>illumination in diffuse and moderately glossy scenes. In particular, we compute <br>the direct, indirect, and caustics illumination during photon tracing from the <br>light sources. Since the high frequencies in the illumination often arise from <br>visibility changes and surface normal variations, we consider a kernel that <br>takes these factors into account. To efficiently detect visibility changes, we <br>introduce a hierarchical voxel data structure of the scene geometry, which is <br>generated on GPU. Further, we preserve the surface orientation by computing the <br>density estimation in ray space. %B Pacific Graphics 2007 %E Alexa, Marc; Gortler, Steven; Ju, Tao %P 407 - 410 %I IEEE Computer Society %@ 0-7695-3009-5
Herzog, R., Havran, V., Kinuwaki, S., Myszkowski, K., and Seidel, H.-P. 2007a. Global Illumination using Photon Ray Splatting. Computer Graphics Forum, Blackwell.
Abstract
We present a novel framework for efficiently computing the indirect <br>illumination in diffuse and moderately glossy<br>scenes using density estimation techniques. Many existing global illumination <br>approaches either quickly compute<br>an overly approximate solution or perform an orders of magnitude slower <br>computation to obtain high-quality<br>results for the indirect illumination. The proposed method improves photon <br>density estimation and leads to significantly<br>better visual quality in particular for complex geometry, while only slightly <br>increasing the computation<br>time. We perform direct splatting of photon rays, which allows us to use <br>simpler search data structures. Since our<br>density estimation is carried out in ray space rather than on surfaces, as in <br>the commonly used photon mapping algorithm,<br>the results are more robust against geometrically incurred sources of bias. <br>This holds also in combination<br>with final gathering where photon mapping often overestimates the illumination <br>near concave geometric features.<br>In addition, we show that our photon splatting technique can be extended to <br>handle moderately glossy surfaces<br>and can be combined with traditional irradiance caching for sparse sampling and <br>filtering in image space.
Export
BibTeX
@inproceedings{Herzog-et-al_Eurographics07, TITLE = {Global Illumination using Photon Ray Splatting}, AUTHOR = {Herzog, Robert and Havran, Vlastimil and Kinuwaki, Shinichi and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2007.01073.x}, LOCALID = {Local-ID: C12573CC004A8E26-922F7B2EB5B8D78CC12573C4004C5B93-HerzogEG2007}, PUBLISHER = {Blackwell}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {We present a novel framework for efficiently computing the indirect <br>illumination in diffuse and moderately glossy<br>scenes using density estimation techniques. Many existing global illumination <br>approaches either quickly compute<br>an overly approximate solution or perform an orders of magnitude slower <br>computation to obtain high-quality<br>results for the indirect illumination. The proposed method improves photon <br>density estimation and leads to significantly<br>better visual quality in particular for complex geometry, while only slightly <br>increasing the computation<br>time. We perform direct splatting of photon rays, which allows us to use <br>simpler search data structures. Since our<br>density estimation is carried out in ray space rather than on surfaces, as in <br>the commonly used photon mapping algorithm,<br>the results are more robust against geometrically incurred sources of bias. <br>This holds also in combination<br>with final gathering where photon mapping often overestimates the illumination <br>near concave geometric features.<br>In addition, we show that our photon splatting technique can be extended to <br>handle moderately glossy surfaces<br>and can be combined with traditional irradiance caching for sparse sampling and <br>filtering in image space.}, BOOKTITLE = {Eurographics 2007}, EDITOR = {Cohen-Or, Daniel and Slavik, Pavel}, PAGES = {503--513}, JOURNAL = {Computer Graphics Forum}, VOLUME = {26}, ISSUE = {3}, ADDRESS = {Prague, Czech Republic}, }
Endnote
%0 Conference Proceedings %A Herzog, Robert %A Havran, Vlastimil %A Kinuwaki, Shinichi %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Global Illumination using Photon Ray Splatting : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1F5A-F %F EDOC: 356513 %F OTHER: Local-ID: C12573CC004A8E26-922F7B2EB5B8D78CC12573C4004C5B93-HerzogEG2007 %R 10.1111/j.1467-8659.2007.01073.x %D 2007 %B Eurographics 2007 %Z date of event: 2007-09-03 - 2007-09-07 %C Prague, Czech Republic %X We present a novel framework for efficiently computing the indirect <br>illumination in diffuse and moderately glossy<br>scenes using density estimation techniques. Many existing global illumination <br>approaches either quickly compute<br>an overly approximate solution or perform an orders of magnitude slower <br>computation to obtain high-quality<br>results for the indirect illumination. The proposed method improves photon <br>density estimation and leads to significantly<br>better visual quality in particular for complex geometry, while only slightly <br>increasing the computation<br>time. We perform direct splatting of photon rays, which allows us to use <br>simpler search data structures. Since our<br>density estimation is carried out in ray space rather than on surfaces, as in <br>the commonly used photon mapping algorithm,<br>the results are more robust against geometrically incurred sources of bias. <br>This holds also in combination<br>with final gathering where photon mapping often overestimates the illumination <br>near concave geometric features.<br>In addition, we show that our photon splatting technique can be extended to <br>handle moderately glossy surfaces<br>and can be combined with traditional irradiance caching for sparse sampling and <br>filtering in image space. %B Eurographics 2007 %E Cohen-Or, Daniel; Slavik, Pavel %P 503 - 513 %I Blackwell %J Computer Graphics Forum %V 26 %N 3 %I Blackwell-Wiley %@ false
Herzog, R., Havran, V., Kinuwaki, S., Myszkowski, K., and Seidel, H.-P. 2007b. Global Illumination using Photon Ray Splatting. Max-Planck-Institut für Informatik, Saarbrücken, Germany.
Abstract
We present a novel framework for efficiently computing the indirect illumination in diffuse and moderately glossy scenes using density estimation techniques. A vast majority of existing global illumination approaches either quickly computes an approximate solution, which may not be adequate for previews, or performs a much more time-consuming computation to obtain high-quality results for the indirect illumination. Our method improves photon density estimation, which is an approximate solution, and leads to significantly better visual quality in particular for complex geometry, while only slightly increasing the computation time. We perform direct splatting of photon rays, which allows us to use simpler search data structures. Our novel lighting computation is derived from basic radiometric theory and requires only small changes to existing photon splatting approaches. Since our density estimation is carried out in ray space rather than on surfaces, as in the commonly used photon mapping algorithm, the results are more robust against geometrically incurred sources of bias. This holds also in combination with final gathering where photon mapping often overestimates the illumination near concave geometric features. In addition, we show that our splatting technique can be extended to handle moderately glossy surfaces and can be combined with traditional irradiance caching for sparse sampling and filtering in image space.
Export
BibTeX
@techreport{HerzogReport2007, TITLE = {Global Illumination using Photon Ray Splatting}, AUTHOR = {Herzog, Robert and Havran, Vlastimil and Kinuwaki, Shinichi and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, NUMBER = {MPI-I-2007-4-007}, LOCALID = {Local-ID: C12573CC004A8E26-88919E23BF524D6AC12573C4005B8D41-HerzogReport2007}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken, Germany}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {We present a novel framework for efficiently computing the indirect illumination in diffuse and moderately glossy scenes using density estimation techniques. A vast majority of existing global illumination approaches either quickly computes an approximate solution, which may not be adequate for previews, or performs a much more time-consuming computation to obtain high-quality results for the indirect illumination. Our method improves photon density estimation, which is an approximate solution, and leads to significantly better visual quality in particular for complex geometry, while only slightly increasing the computation time. We perform direct splatting of photon rays, which allows us to use simpler search data structures. Our novel lighting computation is derived from basic radiometric theory and requires only small changes to existing photon splatting approaches. Since our density estimation is carried out in ray space rather than on surfaces, as in the commonly used photon mapping algorithm, the results are more robust against geometrically incurred sources of bias. This holds also in combination with final gathering where photon mapping often overestimates the illumination near concave geometric features. In addition, we show that our splatting technique can be extended to handle moderately glossy surfaces and can be combined with traditional irradiance caching for sparse sampling and filtering in image space.}, TYPE = {Research Report}, }
Endnote
%0 Report %A Herzog, Robert %A Havran, Vlastimil %A Kinuwaki, Shinichi %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Global Illumination using Photon Ray Splatting : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1F57-6 %F EDOC: 356502 %F OTHER: Local-ID: C12573CC004A8E26-88919E23BF524D6AC12573C4005B8D41-HerzogReport2007 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken, Germany %D 2007 %P 66 p. %X We present a novel framework for efficiently computing the indirect illumination in diffuse and moderately glossy scenes using density estimation techniques. A vast majority of existing global illumination approaches either quickly computes an approximate solution, which may not be adequate for previews, or performs a much more time-consuming computation to obtain high-quality results for the indirect illumination. Our method improves photon density estimation, which is an approximate solution, and leads to significantly better visual quality in particular for complex geometry, while only slightly increasing the computation time. We perform direct splatting of photon rays, which allows us to use simpler search data structures. Our novel lighting computation is derived from basic radiometric theory and requires only small changes to existing photon splatting approaches. Since our density estimation is carried out in ray space rather than on surfaces, as in the commonly used photon mapping algorithm, the results are more robust against geometrically incurred sources of bias. This holds also in combination with final gathering where photon mapping often overestimates the illumination near concave geometric features. In addition, we show that our splatting technique can be extended to handle moderately glossy surfaces and can be combined with traditional irradiance caching for sparse sampling and filtering in image space. %B Research Report
Hasler, N., Rosenhahn, B., and Seidel, H.-P. 2007a. Reverse Engineering Garments. Computer Vision/Computer Graphics Collaboration Techniques (MIRAGE 2007), Springer.
Abstract
Segmenting garments from humanoid meshes or point clouds is a challenging <br>problem with applications in the textile industry and in model based motion <br>capturing. In this work we present a physically based template-matching <br>technique for the automatic extraction of garment dimensions from 3D meshes or <br>point clouds of dressed humans. The successfull identification of garment <br>dimensions also allows the semantic segmentation of the mesh into naked and <br>dressed parts.
Export
BibTeX
@inproceedings{Hasler-et-al_MIRAGE07, TITLE = {Reverse Engineering Garments}, AUTHOR = {Hasler, Nils and Rosenhahn, Bodo and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-540-71456-1}, DOI = {10.1007/978-3-540-71457-6_19}, LOCALID = {Local-ID: C12573CC004A8E26-3133150207FC01F5C1257295003A8DC9-HasRosSei07}, PUBLISHER = {Springer}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {Segmenting garments from humanoid meshes or point clouds is a challenging <br>problem with applications in the textile industry and in model based motion <br>capturing. In this work we present a physically based template-matching <br>technique for the automatic extraction of garment dimensions from 3D meshes or <br>point clouds of dressed humans. The successfull identification of garment <br>dimensions also allows the semantic segmentation of the mesh into naked and <br>dressed parts.}, BOOKTITLE = {Computer Vision/Computer Graphics Collaboration Techniques (MIRAGE 2007)}, EDITOR = {Gagalowicz, Andr{\'e} and Philips, Wilfried}, PAGES = {200--211}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {4418}, ADDRESS = {Rocquencourt, France}, }
Endnote
%0 Conference Proceedings %A Hasler, Nils %A Rosenhahn, Bodo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Reverse Engineering Garments : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-208C-5 %F EDOC: 356633 %R 10.1007/978-3-540-71457-6_19 %F OTHER: Local-ID: C12573CC004A8E26-3133150207FC01F5C1257295003A8DC9-HasRosSei07 %D 2007 %B Third International Conference on Computer Vision/Computer Graphics %Z date of event: 2007-03-28 - 2007-03-30 %C Rocquencourt, France %X Segmenting garments from humanoid meshes or point clouds is a challenging <br>problem with applications in the textile industry and in model based motion <br>capturing. In this work we present a physically based template-matching <br>technique for the automatic extraction of garment dimensions from 3D meshes or <br>point clouds of dressed humans. The successfull identification of garment <br>dimensions also allows the semantic segmentation of the mesh into naked and <br>dressed parts. %B Computer Vision/Computer Graphics Collaboration Techniques %E Gagalowicz, Andr&#233;; Philips, Wilfried %P 200 - 211 %I Springer %@ 3-540-71456-1 %B Lecture Notes in Computer Science %N 4418 %U https://rdcu.be/dIMf2
Hasler, N., Rosenhahn, B., Asbach, M., Ohm, J.-R., and Seidel, H.-P. 2007b. An Analysis-by-Synthesis Approach to Tracking of Textiles. IEEE Workshop on Motion and Video Computing (WMVC’07), IEEE Computer Society.
Abstract
Despite strong interest in cloth simulation on the one hand and tracking of deformable objects on the other, little effort has been put into tracking cloth motion by modelling the fabric. Here, an analysis-by-synthesis approach to tracking textiles is proposed which, by fitting a simulated textile to a set of contours, is able to reconstruct the 3D-cloth configuration. Fitting is accomplished by optimising the parameters of the mass-spring model that is used to simulate the textile as well as the positions of a limited number of constrained points. To improve tracking accuracy and to overcome the inherently chaotic behaviour of the real fabric several techniques for tracking features on the cloth's surface and the best way for them to influence the simulation are evaluated.
Export
BibTeX
@inproceedings{Hasler2007, TITLE = {An Analysis-by-Synthesis Approach to Tracking of Textiles}, AUTHOR = {Hasler, Nils and Rosenhahn, Bodo and Asbach, Mark and Ohm, Jens-Rainer and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1109/WMVC.2007.7}, LOCALID = {Local-ID: C12573CC004A8E26-52FA0507A1991684C125729500397468-Hasler2007}, PUBLISHER = {IEEE Computer Society}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {Despite strong interest in cloth simulation on the one hand and tracking of deformable objects on the other, little effort has been put into tracking cloth motion by modelling the fabric. Here, an analysis-by-synthesis approach to tracking textiles is proposed which, by fitting a simulated textile to a set of contours, is able to reconstruct the 3D-cloth configuration. Fitting is accomplished by optimising the parameters of the mass-spring model that is used to simulate the textile as well as the positions of a limited number of constrained points. To improve tracking accuracy and to overcome the inherently chaotic behaviour of the real fabric several techniques for tracking features on the cloth's surface and the best way for them to influence the simulation are evaluated.}, BOOKTITLE = {IEEE Workshop on Motion and Video Computing (WMVC'07)}, EDITOR = {da Vitoria Lobo, Niels}, PAGES = {25.1--8}, }
Endnote
%0 Conference Proceedings %A Hasler, Nils %A Rosenhahn, Bodo %A Asbach, Mark %A Ohm, Jens-Rainer %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T An Analysis-by-Synthesis Approach to Tracking of Textiles : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1E0E-1 %F EDOC: 356601 %R 10.1109/WMVC.2007.7 %F OTHER: Local-ID: C12573CC004A8E26-52FA0507A1991684C125729500397468-Hasler2007 %I IEEE Computer Society %D 2007 %B Untitled Event %Z date of event: 2007-02-23 - 2007-02-24 %C Austin, USA %X Despite strong interest in cloth simulation on the one hand and tracking of deformable objects on the other, little effort has been put into tracking cloth motion by modelling the fabric. Here, an analysis-by-synthesis approach to tracking textiles is proposed which, by fitting a simulated textile to a set of contours, is able to reconstruct the 3D-cloth configuration. Fitting is accomplished by optimising the parameters of the mass-spring model that is used to simulate the textile as well as the positions of a limited number of constrained points. To improve tracking accuracy and to overcome the inherently chaotic behaviour of the real fabric several techniques for tracking features on the cloth's surface and the best way for them to influence the simulation are evaluated. %B IEEE Workshop on Motion and Video Computing (WMVC'07) %E da Vitoria Lobo, Niels %P 25.1 - 8 %I IEEE Computer Society
Günther, J., Popov, S., Seidel, H.-P., and Slusallek, P. 2007. Realtime Ray Tracing on GPU with BVH-based Packet Traversal. IEEE Symposium on Interactive Ray Tracing 2007, RT’07, IEEE.
Abstract
Recent GPU ray tracers can already achieve performance competitive to that of their CPU counterparts. Nevertheless, these systems can not yet fully exploit the capabilities of modern GPUs and can only handle medium-sized, static scenes. In this paper we present a BVH-based GPU ray tracer with a parallel packet traversal algorithm using a shared stack. We also present a fast, CPU-based BVH construction algorithm which very accurately approximates the surface area heuristic using streamed binning while still being one order of magnitude faster than previously published results. Furthermore, using a BVH allows us to push the size limit of supported scenes on the GPU: We can now ray trace the 12.7~million triangle \textsc{Power Plant} at 1024$\times$1024 image resolution with 3~fps, including shading and shadows.
Export
BibTeX
@inproceedings{guenther:07:BVHonGPU, TITLE = {Realtime Ray Tracing on {GPU} with {BVH}-based Packet Traversal}, AUTHOR = {G{\"u}nther, Johannes and Popov, Stefan and Seidel, Hans-Peter and Slusallek, Philipp}, LANGUAGE = {eng}, ISBN = {978-1-4244-1629-5}, DOI = {10.1109/RT.2007.4342598}, LOCALID = {Local-ID: C12573CC004A8E26-B203DDAC14A06FB1C125732F0034229F-guenther:07:BVHonGPU}, PUBLISHER = {IEEE}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {Recent GPU ray tracers can already achieve performance competitive to that of their CPU counterparts. Nevertheless, these systems can not yet fully exploit the capabilities of modern GPUs and can only handle medium-sized, static scenes. In this paper we present a BVH-based GPU ray tracer with a parallel packet traversal algorithm using a shared stack. We also present a fast, CPU-based BVH construction algorithm which very accurately approximates the surface area heuristic using streamed binning while still being one order of magnitude faster than previously published results. Furthermore, using a BVH allows us to push the size limit of supported scenes on the GPU: We can now ray trace the 12.7~million triangle \textsc{Power Plant} at 1024$\times$1024 image resolution with 3~fps, including shading and shadows.}, BOOKTITLE = {IEEE Symposium on Interactive Ray Tracing 2007, RT'07}, EDITOR = {Keller, Alexander and Christensen, Per}, PAGES = {113--118}, }
Endnote
%0 Conference Proceedings %A G&#252;nther, Johannes %A Popov, Stefan %A Seidel, Hans-Peter %A Slusallek, Philipp %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Realtime Ray Tracing on GPU with BVH-based Packet Traversal : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2078-2 %F EDOC: 356539 %R 10.1109/RT.2007.4342598 %F OTHER: Local-ID: C12573CC004A8E26-B203DDAC14A06FB1C125732F0034229F-guenther:07:BVHonGPU %I IEEE %D 2007 %B Untitled Event %Z date of event: 2007-09-10 - 2007-09-12 %C Ulm, Germany %X Recent GPU ray tracers can already achieve performance competitive to that of their CPU counterparts. Nevertheless, these systems can not yet fully exploit the capabilities of modern GPUs and can only handle medium-sized, static scenes. In this paper we present a BVH-based GPU ray tracer with a parallel packet traversal algorithm using a shared stack. We also present a fast, CPU-based BVH construction algorithm which very accurately approximates the surface area heuristic using streamed binning while still being one order of magnitude faster than previously published results. Furthermore, using a BVH allows us to push the size limit of supported scenes on the GPU: We can now ray trace the 12.7~million triangle \textsc{Power Plant} at 1024$\times$1024 image resolution with 3~fps, including shading and shadows. %B IEEE Symposium on Interactive Ray Tracing 2007, RT'07 %E Keller, Alexander; Christensen, Per %P 113 - 118 %I IEEE %@ 978-1-4244-1629-5
Gross, M., Müller, H., Seidel, H.-P., and Shum, H. 2007a. 07171 Abstracts Collection – Visual Computing – Convergence of Computer Graphics and Computer Vision. Visual Computing - Convergence of Computer Graphics and Computer Vision, Dagstuhl Seminar Proceedings, Volume 7171, Internationales Begegnungs- und Forschungszentrum für Informatik (IBFI).
Export
BibTeX
@inproceedings{DBLP:conf/dagstuhl/GrossMSS07a, TITLE = {07171 Abstracts Collection -- Visual Computing -- Convergence of Computer Graphics and Computer Vision}, AUTHOR = {Gross, Markus and M{\"u}ller, Heinrich and Seidel, Hans-Peter and Shum, Harry}, LANGUAGE = {eng}, ISSN = {1862-4405}, DOI = {10.4230/DagSemProc.07171.1}, PUBLISHER = {Internationales Begegnungs- und Forschungszentrum f{\"u}r Informatik (IBFI)}, YEAR = {2007}, DATE = {2007}, BOOKTITLE = {Visual Computing -- Convergence of Computer Graphics and Computer Vision, Dagstuhl Seminar Proceedings, Volume 7171}, PAGES = {1--18}, SERIES = {Dagstuhl Seminar Proceedings}, VOLUME = {7171}, ADDRESS = {Schloss Dagstuhl, Wadern, Germany}, }
Endnote
%0 Conference Proceedings %A Gross, Markus %A M&#252;ller, Heinrich %A Seidel, Hans-Peter %A Shum, Harry %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T 07171 Abstracts Collection &#8211; Visual Computing &#8211; Convergence of Computer Graphics and Computer Vision : %G eng %U http://hdl.handle.net/21.11116/0000-000F-4B81-B %R 10.4230/DagSemProc.07171.1 %D 2007 %B Visual Computing - Convergence of Computer Graphics and Computer Vision %Z date of event: 2007-04-22 - 2007-04-27 %C Schloss Dagstuhl, Wadern, Germany %B Visual Computing - Convergence of Computer Graphics and Computer Vision, Dagstuhl Seminar Proceedings, Volume 7171 %P 1 - 18 %I Internationales Begegnungs- und Forschungszentrum f&#252;r Informatik (IBFI) %B Dagstuhl Seminar Proceedings %N 7171 %@ false
Gross, M., Müller, H., Seidel, H.-P., and Shum, H. 2007b. 07171 Summary – Visual Computing – Convergence of Computer Graphics and Computer Vision. Visual Computing - Convergence of Computer Graphics and Computer Vision, Dagstuhl Seminar Proceedings, Volume 7171, Internationales Begegnungs- und Forschungszentrum fuer Informatik (IBFI).
Export
BibTeX
@inproceedings{DBLP:conf/dagstuhl/GrossMSS07, TITLE = {07171 Summary -- Visual Computing -- Convergence of Computer Graphics and Computer Vision}, AUTHOR = {Gross, Markus and M{\"u}ller, Heinrich and Seidel, Hans-Peter and Shum, Harry}, LANGUAGE = {eng}, ISSN = {1862-4405}, DOI = {10.4230/DagSemProc.07171.2}, PUBLISHER = {Internationales Begegnungs- und Forschungszentrum fuer Informatik (IBFI)}, YEAR = {2007}, DATE = {2007}, BOOKTITLE = {Visual Computing -- Convergence of Computer Graphics and Computer Vision, Dagstuhl Seminar Proceedings, Volume 7171}, PAGES = {1--4}, SERIES = {Dagstuhl Seminar Proceedings}, VOLUME = {07171}, ADDRESS = {Schloss Dagstuhl, Wadern, Germany}, }
Endnote
%0 Conference Proceedings %A Gross, Markus %A M&#252;ller, Heinrich %A Seidel, Hans-Peter %A Shum, Harry %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T 07171 Summary &#8211; Visual Computing &#8211; Convergence of Computer Graphics and Computer Vision : %G eng %U http://hdl.handle.net/21.11116/0000-000F-4B83-9 %R 10.4230/DagSemProc.07171.2 %D 2007 %B Visual Computing - Convergence of Computer Graphics and Computer Vision %Z date of event: 2007-04-22 - 2007-04-27 %C Schloss Dagstuhl, Wadern, Germany %B Visual Computing - Convergence of Computer Graphics and Computer Vision, Dagstuhl Seminar Proceedings, Volume 7171 %P 1 - 4 %I Internationales Begegnungs- und Forschungszentrum fuer Informatik (IBFI) %B Dagstuhl Seminar Proceedings %N 07171 %@ false
Gall, J., Brox, T., Rosenhahn, B., and Seidel, H.-P. 2007a. Global stochastic optimization for robust and accurate human motion capture. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
Tracking of human motion in video is usually tackled either by local optimization or filtering approaches. While local optimization offers accurate estimates but often looses track due to local optima, particle filtering can recover from errors at the expense of a poor accuracy due to overestimation of noise. In this paper, we propose to embed global stochastic optimization in a tracking framework. This new optimization technique exhibits both the robustness of filtering strategies and a remarkable accuracy. We apply the optimization to an energy function that relies on silhouettes and color, as well as some prior information on physical constraints. This framework provides a general solution to markerless human motion capture since neither excessive preprocessing nor strong assumptions except of a 3D model are required. The optimization provides initialization and accurate tracking even in case of low contrast and challenging illumination. Our experimental evaluation demonstrates the large improvements obtained with this technique. It comprises a quantitative error analysis comparing the approach with local optimization, particle filtering, and a heuristic based on particle filtering.
Export
BibTeX
@techreport{GallBroxRosenhahnSeidel2008, TITLE = {Global stochastic optimization for robust and accurate human motion capture}, AUTHOR = {Gall, J{\"u}rgen and Brox, Thomas and Rosenhahn, Bodo and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2007-4-008}, NUMBER = {MPI-I-2007-4-008}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {Tracking of human motion in video is usually tackled either by local optimization or filtering approaches. While local optimization offers accurate estimates but often looses track due to local optima, particle filtering can recover from errors at the expense of a poor accuracy due to overestimation of noise. In this paper, we propose to embed global stochastic optimization in a tracking framework. This new optimization technique exhibits both the robustness of filtering strategies and a remarkable accuracy. We apply the optimization to an energy function that relies on silhouettes and color, as well as some prior information on physical constraints. This framework provides a general solution to markerless human motion capture since neither excessive preprocessing nor strong assumptions except of a 3D model are required. The optimization provides initialization and accurate tracking even in case of low contrast and challenging illumination. Our experimental evaluation demonstrates the large improvements obtained with this technique. It comprises a quantitative error analysis comparing the approach with local optimization, particle filtering, and a heuristic based on particle filtering.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Gall, J&#252;rgen %A Brox, Thomas %A Rosenhahn, Bodo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Global stochastic optimization for robust and accurate human motion capture : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-66CE-7 %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2007-4-008 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2007 %P 28 p. %X Tracking of human motion in video is usually tackled either by local optimization or filtering approaches. While local optimization offers accurate estimates but often looses track due to local optima, particle filtering can recover from errors at the expense of a poor accuracy due to overestimation of noise. In this paper, we propose to embed global stochastic optimization in a tracking framework. This new optimization technique exhibits both the robustness of filtering strategies and a remarkable accuracy. We apply the optimization to an energy function that relies on silhouettes and color, as well as some prior information on physical constraints. This framework provides a general solution to markerless human motion capture since neither excessive preprocessing nor strong assumptions except of a 3D model are required. The optimization provides initialization and accurate tracking even in case of low contrast and challenging illumination. Our experimental evaluation demonstrates the large improvements obtained with this technique. It comprises a quantitative error analysis comparing the approach with local optimization, particle filtering, and a heuristic based on particle filtering. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Gall, J., Rosenhahn, B., and Seidel, H.-P. 2007b. Clustered Stochastic Optimization for Object Recognition and Pose Estimation. Pattern Recognition, Springer.
Abstract
We present an approach for estimating the 3D position and<br>in case of articulated objects also the joint configuration from segmented<br>2D images. The pose estimation without initial information is a challenging<br>optimization problem in a high dimensional space and is essential for<br>texture acquisition and initialization of model-based tracking algorithms.<br>Our method is able to recognize the correct object in the case of multiple<br>objects and estimates its pose with a high accuracy. The key component<br>is a particle-based global optimization method that converges to the<br>global minimum similar to simulated annealing. After detecting potential<br>bounded subsets of the search space, the particles are divided into<br>clusters and migrate to the most attractive cluster as the time increases.<br>The performance of our approach is verified by means of real scenes and a<br>quantative error analysis for image distortions. Our experiments include<br>rigid bodies and full human bodies.
Export
BibTeX
@inproceedings{Gall-et-al_DAGM07, TITLE = {Clustered Stochastic Optimization for Object Recognition and Pose Estimation}, AUTHOR = {Gall, J{\"u}rgen and Rosenhahn, Bodo and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-540-74933-2}, DOI = {10.1007/978-3-540-74936-3_4}, LOCALID = {Local-ID: C12573CC004A8E26-B9542DAFAE69A07BC125736000410E0A-Gall2007a}, PUBLISHER = {Springer}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {We present an approach for estimating the 3D position and<br>in case of articulated objects also the joint configuration from segmented<br>2D images. The pose estimation without initial information is a challenging<br>optimization problem in a high dimensional space and is essential for<br>texture acquisition and initialization of model-based tracking algorithms.<br>Our method is able to recognize the correct object in the case of multiple<br>objects and estimates its pose with a high accuracy. The key component<br>is a particle-based global optimization method that converges to the<br>global minimum similar to simulated annealing. After detecting potential<br>bounded subsets of the search space, the particles are divided into<br>clusters and migrate to the most attractive cluster as the time increases.<br>The performance of our approach is verified by means of real scenes and a<br>quantative error analysis for image distortions. Our experiments include<br>rigid bodies and full human bodies.}, BOOKTITLE = {Pattern Recognition}, EDITOR = {Hamprecht, Fred A. and Schn{\"o}rr, Christoph and J{\"a}hne, Bernd}, PAGES = {32--41}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {4713}, ADDRESS = {Heidelberg, Germany}, }
Endnote
%0 Conference Proceedings %A Gall, J&#252;rgen %A Rosenhahn, Bodo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Clustered Stochastic Optimization for Object Recognition and Pose Estimation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1E83-A %F EDOC: 356542 %R 10.1007/978-3-540-74936-3_4 %F OTHER: Local-ID: C12573CC004A8E26-B9542DAFAE69A07BC125736000410E0A-Gall2007a %D 2007 %B 29th DAGM Symposium on Pattern Recognition %Z date of event: 2007-09-12 - 2007-09-14 %C Heidelberg, Germany %X We present an approach for estimating the 3D position and<br>in case of articulated objects also the joint configuration from segmented<br>2D images. The pose estimation without initial information is a challenging<br>optimization problem in a high dimensional space and is essential for<br>texture acquisition and initialization of model-based tracking algorithms.<br>Our method is able to recognize the correct object in the case of multiple<br>objects and estimates its pose with a high accuracy. The key component<br>is a particle-based global optimization method that converges to the<br>global minimum similar to simulated annealing. After detecting potential<br>bounded subsets of the search space, the particles are divided into<br>clusters and migrate to the most attractive cluster as the time increases.<br>The performance of our approach is verified by means of real scenes and a<br>quantative error analysis for image distortions. Our experiments include<br>rigid bodies and full human bodies. %B Pattern Recognition %E Hamprecht, Fred A.; Schn&#246;rr, Christoph; J&#228;hne, Bernd %P 32 - 41 %I Springer %@ 978-3-540-74933-2 %B Lecture Notes in Computer Science %N 4713 %U https://rdcu.be/dIMTs
Gall, J., Potthoff, J., Schnörr, C., Rosenhahn, B., and Seidel, H.-P. 2007c. Interacting and Annealing Particle Filters: Mathematics and a Recipe for Applications. Journal of Mathematical Imaging and Vision28, 1.
Abstract
Interacting and annealing are two powerful strategies<br>that are applied in different areas of stochastic modelling<br>and data analysis. Interacting particle systems approximate<br>a distribution of interest by a finite number of particles<br>where the particles interact between the time steps. In computer<br>vision, they are commonly known as particle filters.<br>Simulated annealing, on the other hand, is a global optimization<br>method derived from statistical mechanics. A recent<br>heuristic approach to fuse these two techniques for motion<br>capturing has become known as annealed particle filter.<br>In order to analyze these techniques, we rigorously derive in<br>this paper two algorithms with annealing properties based<br>on the mathematical theory of interacting particle systems.<br>Convergence results and sufficient parameter restrictions enable<br>us to point out limitations of the annealed particle filter.<br>Moreover, we evaluate the impact of the parameters on the<br>performance in various experiments, including the tracking<br>of articulated bodies from noisy measurements. Our results<br>provide a general guidance on suitable parameter choices for<br>different applications.
Export
BibTeX
@article{Gall-et-al_JMIV07, TITLE = {Interacting and Annealing Particle Filters: Mathematics and a Recipe for Applications}, AUTHOR = {Gall, J{\"u}rgen and Potthoff, J{\"u}rgen and Schn{\"o}rr, Christoph and Rosenhahn, Bodo and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0924-9907}, DOI = {10.1007/s10851-007-0007-8}, LOCALID = {Local-ID: C12573CC004A8E26-F1BBDA1CDFE550C4C1257291004608C1-Gall2007az}, PUBLISHER = {Kluwer Academic Publishers}, ADDRESS = {Dordrecht, Holland}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {Interacting and annealing are two powerful strategies<br>that are applied in different areas of stochastic modelling<br>and data analysis. Interacting particle systems approximate<br>a distribution of interest by a finite number of particles<br>where the particles interact between the time steps. In computer<br>vision, they are commonly known as particle filters.<br>Simulated annealing, on the other hand, is a global optimization<br>method derived from statistical mechanics. A recent<br>heuristic approach to fuse these two techniques for motion<br>capturing has become known as annealed particle filter.<br>In order to analyze these techniques, we rigorously derive in<br>this paper two algorithms with annealing properties based<br>on the mathematical theory of interacting particle systems.<br>Convergence results and sufficient parameter restrictions enable<br>us to point out limitations of the annealed particle filter.<br>Moreover, we evaluate the impact of the parameters on the<br>performance in various experiments, including the tracking<br>of articulated bodies from noisy measurements. Our results<br>provide a general guidance on suitable parameter choices for<br>different applications.}, JOURNAL = {Journal of Mathematical Imaging and Vision}, VOLUME = {28}, NUMBER = {1}, PAGES = {1--18}, }
Endnote
%0 Journal Article %A Gall, J&#252;rgen %A Potthoff, J&#252;rgen %A Schn&#246;rr, Christoph %A Rosenhahn, Bodo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Interacting and Annealing Particle Filters: Mathematics and a Recipe for Applications : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1F9A-0 %F EDOC: 356606 %R 10.1007/s10851-007-0007-8 %F OTHER: Local-ID: C12573CC004A8E26-F1BBDA1CDFE550C4C1257291004608C1-Gall2007az %D 2007 %* Review method: peer-reviewed %X Interacting and annealing are two powerful strategies<br>that are applied in different areas of stochastic modelling<br>and data analysis. Interacting particle systems approximate<br>a distribution of interest by a finite number of particles<br>where the particles interact between the time steps. In computer<br>vision, they are commonly known as particle filters.<br>Simulated annealing, on the other hand, is a global optimization<br>method derived from statistical mechanics. A recent<br>heuristic approach to fuse these two techniques for motion<br>capturing has become known as annealed particle filter.<br>In order to analyze these techniques, we rigorously derive in<br>this paper two algorithms with annealing properties based<br>on the mathematical theory of interacting particle systems.<br>Convergence results and sufficient parameter restrictions enable<br>us to point out limitations of the annealed particle filter.<br>Moreover, we evaluate the impact of the parameters on the<br>performance in various experiments, including the tracking<br>of articulated bodies from noisy measurements. Our results<br>provide a general guidance on suitable parameter choices for<br>different applications. %J Journal of Mathematical Imaging and Vision %V 28 %N 1 %& 1 %P 1 - 18 %I Kluwer Academic Publishers %C Dordrecht, Holland %@ false %U https://rdcu.be/dISsg
Gall, J., Rosenhahn, B., and Seidel, H.-P. 2007d. Clustered stochastic optimization for object recognition and pose estimation. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
We present an approach for estimating the 3D position and in case of articulated objects also the joint configuration from segmented 2D images. The pose estimation without initial information is a challenging optimization problem in a high dimensional space and is essential for texture acquisition and initialization of model-based tracking algorithms. Our method is able to recognize the correct object in the case of multiple objects and estimates its pose with a high accuracy. The key component is a particle-based global optimization method that converges to the global minimum similar to simulated annealing. After detecting potential bounded subsets of the search space, the particles are divided into clusters and migrate to the most attractive cluster as the time increases. The performance of our approach is verified by means of real scenes and a quantative error analysis for image distortions. Our experiments include rigid bodies and full human bodies.
Export
BibTeX
@techreport{GallRosenhahnSeidel2007, TITLE = {Clustered stochastic optimization for object recognition and pose estimation}, AUTHOR = {Gall, J{\"u}rgen and Rosenhahn, Bodo and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2007-4-001}, NUMBER = {MPI-I-2007-4-001}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {We present an approach for estimating the 3D position and in case of articulated objects also the joint configuration from segmented 2D images. The pose estimation without initial information is a challenging optimization problem in a high dimensional space and is essential for texture acquisition and initialization of model-based tracking algorithms. Our method is able to recognize the correct object in the case of multiple objects and estimates its pose with a high accuracy. The key component is a particle-based global optimization method that converges to the global minimum similar to simulated annealing. After detecting potential bounded subsets of the search space, the particles are divided into clusters and migrate to the most attractive cluster as the time increases. The performance of our approach is verified by means of real scenes and a quantative error analysis for image distortions. Our experiments include rigid bodies and full human bodies.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Gall, J&#252;rgen %A Rosenhahn, Bodo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Clustered stochastic optimization for object recognition and pose estimation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-66E5-2 %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2007-4-001 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2007 %P 23 p. %X We present an approach for estimating the 3D position and in case of articulated objects also the joint configuration from segmented 2D images. The pose estimation without initial information is a challenging optimization problem in a high dimensional space and is essential for texture acquisition and initialization of model-based tracking algorithms. Our method is able to recognize the correct object in the case of multiple objects and estimates its pose with a high accuracy. The key component is a particle-based global optimization method that converges to the global minimum similar to simulated annealing. After detecting potential bounded subsets of the search space, the particles are divided into clusters and migrate to the most attractive cluster as the time increases. The performance of our approach is verified by means of real scenes and a quantative error analysis for image distortions. Our experiments include rigid bodies and full human bodies. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Gall, J., Potthoff, J., Schnörr, C., Rosenhahn, B., and Seidel, H.-P. 2007e. Interacting and Annealing Particle Filters: Mathematics and a Recipe for Applications. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
Interacting and annealing are two powerful strategies that are applied in different areas of stochastic modelling and data analysis. Interacting particle systems approximate a distribution of interest by a finite number of particles where the particles interact between the time steps. In computer vision, they are commonly known as particle filters. Simulated annealing, on the other hand, is a global optimization method derived from statistical mechanics. A recent heuristic approach to fuse these two techniques for motion capturing has become known as annealed particle filter. In order to analyze these techniques, we rigorously derive in this paper two algorithms with annealing properties based on the mathematical theory of interacting particle systems. Convergence results and sufficient parameter restrictions enable us to point out limitations of the annealed particle filter. Moreover, we evaluate the impact of the parameters on the performance in various experiments, including the tracking of articulated bodies from noisy measurements. Our results provide a general guidance on suitable parameter choices for different applications.
Export
BibTeX
@techreport{GallPotthoffRosenhahnSchnoerrSeidel2006, TITLE = {Interacting and Annealing Particle Filters: Mathematics and a Recipe for Applications}, AUTHOR = {Gall, J{\"u}rgen and Potthoff, J{\"u}rgen and Schn{\"o}rr, Christoph and Rosenhahn, Bodo and Seidel, Hans-Peter}, LANGUAGE = {eng}, NUMBER = {MPI-I-2006-4-009}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {Interacting and annealing are two powerful strategies that are applied in different areas of stochastic modelling and data analysis. Interacting particle systems approximate a distribution of interest by a finite number of particles where the particles interact between the time steps. In computer vision, they are commonly known as particle filters. Simulated annealing, on the other hand, is a global optimization method derived from statistical mechanics. A recent heuristic approach to fuse these two techniques for motion capturing has become known as annealed particle filter. In order to analyze these techniques, we rigorously derive in this paper two algorithms with annealing properties based on the mathematical theory of interacting particle systems. Convergence results and sufficient parameter restrictions enable us to point out limitations of the annealed particle filter. Moreover, we evaluate the impact of the parameters on the performance in various experiments, including the tracking of articulated bodies from noisy measurements. Our results provide a general guidance on suitable parameter choices for different applications.}, TYPE = {Research Report}, }
Endnote
%0 Report %A Gall, J&#252;rgen %A Potthoff, J&#252;rgen %A Schn&#246;rr, Christoph %A Rosenhahn, Bodo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Interacting and Annealing Particle Filters: Mathematics and a Recipe for Applications : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0027-13C7-D %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2007 %Z Review method: peer-reviewed %X Interacting and annealing are two powerful strategies that are applied in different areas of stochastic modelling and data analysis. Interacting particle systems approximate a distribution of interest by a finite number of particles where the particles interact between the time steps. In computer vision, they are commonly known as particle filters. Simulated annealing, on the other hand, is a global optimization method derived from statistical mechanics. A recent heuristic approach to fuse these two techniques for motion capturing has become known as annealed particle filter. In order to analyze these techniques, we rigorously derive in this paper two algorithms with annealing properties based on the mathematical theory of interacting particle systems. Convergence results and sufficient parameter restrictions enable us to point out limitations of the annealed particle filter. Moreover, we evaluate the impact of the parameters on the performance in various experiments, including the tracking of articulated bodies from noisy measurements. Our results provide a general guidance on suitable parameter choices for different applications. %B Research Report
Fuchs, M., Lensch, H.P.A., Blanz, V., and Seidel, H.-P. 2007a. Superresolution Reflectance Fields: Synthesizing Images for Intermediate Light Directions. Computer Graphics Forum, Blackwell.
Abstract
Captured reflectance fields tend to provide a relatively coarse sampling of the <br>incident light directions. As a result, sharp illumination features, such as <br>highlights or shadow boundaries, are poorly reconstructed during relighting; <br>highlights are disconnected, and shadows show banding artefacts. In this paper, <br>we propose a novel<br>interpolation technique for 4D reflectance fields that reconstructs plausible <br>images even for non-observed light directions. Given a sparsely sampled <br>reflectance field, we can effectively synthesize images as they would have been <br>obtained from denser sampling. The processing pipeline consists of three steps: <br>(1) segmentation of regions where intermediate lighting cannot be obtained by <br>blending, (2) appropriate flow algorithms for highlights and shadows, plus (3) <br>a final reconstruction technique that uses image-based priors to faithfully <br>correct errors that<br>might be introduced by the segmentation or flow step. The algorithm reliably <br>reproduces scenes that contain specular highlights, interreflections, shadows <br>or caustics.
Export
BibTeX
@inproceedings{Fuchs-et-al_Eurographics07, TITLE = {Superresolution Reflectance Fields: Synthesizing Images for Intermediate Light Directions}, AUTHOR = {Fuchs, Martin and Lensch, Hendrik P. A. and Blanz, Volker and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2007.01067.x}, LOCALID = {Local-ID: C12573CC004A8E26-4C7DB5B41C6B3F27C125736400511B5A-Fuchs2007b}, PUBLISHER = {Blackwell}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {Captured reflectance fields tend to provide a relatively coarse sampling of the <br>incident light directions. As a result, sharp illumination features, such as <br>highlights or shadow boundaries, are poorly reconstructed during relighting; <br>highlights are disconnected, and shadows show banding artefacts. In this paper, <br>we propose a novel<br>interpolation technique for 4D reflectance fields that reconstructs plausible <br>images even for non-observed light directions. Given a sparsely sampled <br>reflectance field, we can effectively synthesize images as they would have been <br>obtained from denser sampling. The processing pipeline consists of three steps: <br>(1) segmentation of regions where intermediate lighting cannot be obtained by <br>blending, (2) appropriate flow algorithms for highlights and shadows, plus (3) <br>a final reconstruction technique that uses image-based priors to faithfully <br>correct errors that<br>might be introduced by the segmentation or flow step. The algorithm reliably <br>reproduces scenes that contain specular highlights, interreflections, shadows <br>or caustics.}, BOOKTITLE = {Eurographics 2007}, EDITOR = {Cohen-Or, Daniel and Slav{\'i}k, Pavel}, PAGES = {447--456}, JOURNAL = {Computer Graphics Forum}, VOLUME = {26}, ISSUE = {3}, ADDRESS = {Prague, Czech Republic}, }
Endnote
%0 Conference Proceedings %A Fuchs, Martin %A Lensch, Hendrik P. A. %A Blanz, Volker %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Superresolution Reflectance Fields: Synthesizing Images for Intermediate Light Directions : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-20DC-4 %F EDOC: 356535 %R 10.1111/j.1467-8659.2007.01067.x %F OTHER: Local-ID: C12573CC004A8E26-4C7DB5B41C6B3F27C125736400511B5A-Fuchs2007b %D 2007 %B Eurographics 2007 %Z date of event: 2007-09-03 - 2007-09-07 %C Prague, Czech Republic %X Captured reflectance fields tend to provide a relatively coarse sampling of the <br>incident light directions. As a result, sharp illumination features, such as <br>highlights or shadow boundaries, are poorly reconstructed during relighting; <br>highlights are disconnected, and shadows show banding artefacts. In this paper, <br>we propose a novel<br>interpolation technique for 4D reflectance fields that reconstructs plausible <br>images even for non-observed light directions. Given a sparsely sampled <br>reflectance field, we can effectively synthesize images as they would have been <br>obtained from denser sampling. The processing pipeline consists of three steps: <br>(1) segmentation of regions where intermediate lighting cannot be obtained by <br>blending, (2) appropriate flow algorithms for highlights and shadows, plus (3) <br>a final reconstruction technique that uses image-based priors to faithfully <br>correct errors that<br>might be introduced by the segmentation or flow step. The algorithm reliably <br>reproduces scenes that contain specular highlights, interreflections, shadows <br>or caustics. %B Eurographics 2007 %E Cohen-Or, Daniel; Slav&#237;k, Pavel %P 447 - 456 %I Blackwell %J Computer Graphics Forum %V 26 %N 3 %I Blackwell-Wiley %@ false
Fuchs, M., Blanz, V., Lensch, H.P.A., and Seidel, H.-P. 2007b. Adaptive Sampling of Reflectance Fields. ACM Transactions on Graphics26, 2.
Abstract
Image-based relighting achieves high quality in rendering, but it requires a <br>large number of measurements of the reflectance field. This article discusses <br>sampling techniques that improve on the trade-offs between measurement effort <br>and reconstruction quality.<br><br>Specifically, we (i) demonstrate that sampling with point lights and from a <br>sparse set of incoming light directions creates artifacts which can be reduced <br>significantly by employing extended light sources for sampling, (ii) propose a <br>sampling algorithm which incrementally chooses light directions adapted to the <br>properties of the reflectance field being measured, thus capturing significant <br>features faster than fixed-pattern sampling, and (iii) combine reflectance <br>fields<br>from two different light domain resolutions.<br><br>We present an automated measurement setup for well-defined angular <br>distributions of the incident, indirect illumination. It is based on <br>programmable spotlights with controlled aperture that illuminate the walls <br>around the scene.
Export
BibTeX
@article{Fuchs-et-al_TG07, TITLE = {Adaptive Sampling of Reflectance Fields}, AUTHOR = {Fuchs, Martin and Blanz, Volker and Lensch, Hendrik P. A. and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/1243980.1243984}, LOCALID = {Local-ID: C12573CC004A8E26-137EDB39E9A2266BC1257364004C99E6-Fuchs2007a}, PUBLISHER = {Association for Computing Machinery}, ADDRESS = {New York, NY}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {Image-based relighting achieves high quality in rendering, but it requires a <br>large number of measurements of the reflectance field. This article discusses <br>sampling techniques that improve on the trade-offs between measurement effort <br>and reconstruction quality.<br><br>Specifically, we (i) demonstrate that sampling with point lights and from a <br>sparse set of incoming light directions creates artifacts which can be reduced <br>significantly by employing extended light sources for sampling, (ii) propose a <br>sampling algorithm which incrementally chooses light directions adapted to the <br>properties of the reflectance field being measured, thus capturing significant <br>features faster than fixed-pattern sampling, and (iii) combine reflectance <br>fields<br>from two different light domain resolutions.<br><br>We present an automated measurement setup for well-defined angular <br>distributions of the incident, indirect illumination. It is based on <br>programmable spotlights with controlled aperture that illuminate the walls <br>around the scene.}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {26}, NUMBER = {2}, PAGES = {10.1--18}, }
Endnote
%0 Journal Article %A Fuchs, Martin %A Blanz, Volker %A Lensch, Hendrik P. A. %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Adaptive Sampling of Reflectance Fields : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1DDC-C %F EDOC: 356536 %R 10.1145/1243980.1243984 %F OTHER: Local-ID: C12573CC004A8E26-137EDB39E9A2266BC1257364004C99E6-Fuchs2007a %D 2007 %* Review method: peer-reviewed %X Image-based relighting achieves high quality in rendering, but it requires a <br>large number of measurements of the reflectance field. This article discusses <br>sampling techniques that improve on the trade-offs between measurement effort <br>and reconstruction quality.<br><br>Specifically, we (i) demonstrate that sampling with point lights and from a <br>sparse set of incoming light directions creates artifacts which can be reduced <br>significantly by employing extended light sources for sampling, (ii) propose a <br>sampling algorithm which incrementally chooses light directions adapted to the <br>properties of the reflectance field being measured, thus capturing significant <br>features faster than fixed-pattern sampling, and (iii) combine reflectance <br>fields<br>from two different light domain resolutions.<br><br>We present an automated measurement setup for well-defined angular <br>distributions of the incident, indirect illumination. It is based on <br>programmable spotlights with controlled aperture that illuminate the walls <br>around the scene. %J ACM Transactions on Graphics %V 26 %N 2 %& 10.1 %P 10.1 - 18 %I Association for Computing Machinery %C New York, NY %@ false
Fuchs, C., Chen, T., Goesele, M., Theisel, H., and Seidel, H.-P. 2007c. Density Estimation for Dynamic Volumes. Computers and Graphics31, 2.
Export
BibTeX
@article{Fuchs-et-al_CG07, TITLE = {Density Estimation for Dynamic Volumes}, AUTHOR = {Fuchs, Christian and Chen, Tongbo and Goesele, Michael and Theisel, Holger and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0097-8493}, DOI = {10.1016/j.cag.2006.11.014}, LOCALID = {Local-ID: C12573CC004A8E26-FDE6F49D5C0AE8C5C12573DF00390447-Fuchs:2007:DEF}, PUBLISHER = {Pergamon}, ADDRESS = {New York}, YEAR = {2007}, DATE = {2007}, JOURNAL = {Computers and Graphics}, VOLUME = {31}, NUMBER = {2}, PAGES = {205--211}, }
Endnote
%0 Journal Article %A Fuchs, Christian %A Chen, Tongbo %A Goesele, Michael %A Theisel, Holger %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Density Estimation for Dynamic Volumes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1ED6-E %F EDOC: 356508 %R 10.1016/j.cag.2006.11.014 %F OTHER: Local-ID: C12573CC004A8E26-FDE6F49D5C0AE8C5C12573DF00390447-Fuchs:2007:DEF %D 2007 %* Review method: peer-reviewed %J Computers and Graphics %V 31 %N 2 %& 205 %P 205 - 211 %I Pergamon %C New York %@ false
Dyken, C., Ziegler, G., Theobalt, C., and Seidel, H.-P. 2007. HistoPyramids in Iso-Surface Extraction. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
We present an implementation approach to high-speed Marching Cubes, running entirely on the Graphics Processing Unit of Shader Model 3.0 and 4.0 graphics hardware. Our approach is based on the interpretation of Marching Cubes as a stream compaction and expansion process, and is implemented using the HistoPyramid, a hierarchical data structure previously only used in GPU data compaction. We extend the HistoPyramid structure to allow for stream expansion, which provides an efficient method for generating geometry directly on the GPU, even on Shader Model 3.0 hardware. Currently, our algorithm outperforms all other known GPU-based iso-surface extraction algorithms. We describe our implementation and present a performance analysis on several generations of graphics hardware.
Export
BibTeX
@techreport{DykenZieglerTheobaltSeidel2007, TITLE = {Histo{P}yramids in Iso-Surface Extraction}, AUTHOR = {Dyken, Christopher and Ziegler, Gernot and Theobalt, Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2007-4-006}, NUMBER = {MPI-I-2007-4-006}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {We present an implementation approach to high-speed Marching Cubes, running entirely on the Graphics Processing Unit of Shader Model 3.0 and 4.0 graphics hardware. Our approach is based on the interpretation of Marching Cubes as a stream compaction and expansion process, and is implemented using the HistoPyramid, a hierarchical data structure previously only used in GPU data compaction. We extend the HistoPyramid structure to allow for stream expansion, which provides an efficient method for generating geometry directly on the GPU, even on Shader Model 3.0 hardware. Currently, our algorithm outperforms all other known GPU-based iso-surface extraction algorithms. We describe our implementation and present a performance analysis on several generations of graphics hardware.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Dyken, Christopher %A Ziegler, Gernot %A Theobalt, Christian %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T HistoPyramids in Iso-Surface Extraction : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-66D3-A %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2007-4-006 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2007 %P 16 p. %X We present an implementation approach to high-speed Marching Cubes, running entirely on the Graphics Processing Unit of Shader Model 3.0 and 4.0 graphics hardware. Our approach is based on the interpretation of Marching Cubes as a stream compaction and expansion process, and is implemented using the HistoPyramid, a hierarchical data structure previously only used in GPU data compaction. We extend the HistoPyramid structure to allow for stream expansion, which provides an efficient method for generating geometry directly on the GPU, even on Shader Model 3.0 hardware. Currently, our algorithm outperforms all other known GPU-based iso-surface extraction algorithms. We describe our implementation and present a performance analysis on several generations of graphics hardware. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Dong, Z., Kautz, J., Theobalt, C., and Seidel, H.-P. 2007. Interactive Global Illumination Using Implicit Visibility. Pacific Graphics 2007 (PG 2007), IEEE Computer Society.
Export
BibTeX
@inproceedings{DongImVis2007, TITLE = {Interactive Global Illumination Using Implicit Visibility}, AUTHOR = {Dong, Zhao and Kautz, Jan and Theobalt, Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-3009-5}, DOI = {10.1109/PG.2007.37}, LOCALID = {Local-ID: C12573CC004A8E26-08A263C42E568773C125731A00525FB3-DongImVis2007}, PUBLISHER = {IEEE Computer Society}, YEAR = {2007}, DATE = {2007}, BOOKTITLE = {Pacific Graphics 2007 (PG 2007)}, EDITOR = {Alexa, Marc and Gortler, Steven and Ju, Tao}, PAGES = {77--86}, ADDRESS = {Maui, HI, USA}, }
Endnote
%0 Conference Proceedings %A Dong, Zhao %A Kautz, Jan %A Theobalt, Christian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Programming Logics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive Global Illumination Using Implicit Visibility : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1F9E-8 %F EDOC: 356543 %R 10.1109/PG.2007.37 %F OTHER: Local-ID: C12573CC004A8E26-08A263C42E568773C125731A00525FB3-DongImVis2007 %D 2007 %B 15th Pacific Conference on Computer Graphics and Applications %Z date of event: 2007-10-29 - 2007-11-02 %C Maui, HI, USA %B Pacific Graphics 2007 %E Alexa, Marc; Gortler, Steven; Ju, Tao %P 77 - 86 %I IEEE Computer Society %@ 0-7695-3009-5
De Aguiar, E., Zayer, R., Theobalt, C., Magnor, M., and Seidel, H.-P. 2007a. Video-driven animation of human body scans. IEEE 3DTV Conference, IEEE.
Abstract
We present a versatile, fast and simple framework to generate animations of scanned human characters from input multiview video sequences. Our method is purely mesh-based and requires only a minimum of manual interaction. The proposed algorithm implicitly generates realistic body deformations and can easily transfer motions between human subjects of completely different shape and proportions. We feature a working prototype system that demonstrates that our method can generate convincing lifelike character animations from marker-less optical motion capture data.
Export
BibTeX
@inproceedings{deAguiar3DTVCON2007, TITLE = {Video-driven animation of human body scans}, AUTHOR = {de Aguiar, Edilson and Zayer, Rhaleb and Theobalt, Christian and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4244-0721-7}, DOI = {10.1109/3DTV.2007.4379409}, LOCALID = {Local-ID: C12573CC004A8E26-0B2C455FFF21EA27C1257298005039DF-deAguiar3DTVCON2007}, PUBLISHER = {IEEE}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {We present a versatile, fast and simple framework to generate animations of scanned human characters from input multiview video sequences. Our method is purely mesh-based and requires only a minimum of manual interaction. The proposed algorithm implicitly generates realistic body deformations and can easily transfer motions between human subjects of completely different shape and proportions. We feature a working prototype system that demonstrates that our method can generate convincing lifelike character animations from marker-less optical motion capture data.}, BOOKTITLE = {IEEE 3DTV Conference}, EDITOR = {Triantafyllidis, Georgios and Onural, Levent}, PAGES = {4379409.1--4}, }
Endnote
%0 Conference Proceedings %A de Aguiar, Edilson %A Zayer, Rhaleb %A Theobalt, Christian %A Magnor, Marcus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Programming Logics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Video-driven animation of human body scans : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-212F-F %F EDOC: 356600 %R 10.1109/3DTV.2007.4379409 %F OTHER: Local-ID: C12573CC004A8E26-0B2C455FFF21EA27C1257298005039DF-deAguiar3DTVCON2007 %I IEEE %D 2007 %B Untitled Event %Z date of event: 2007-05-07 - 2007-05-09 %C Kos Island, Greece %X We present a versatile, fast and simple framework to generate animations of scanned human characters from input multiview video sequences. Our method is purely mesh-based and requires only a minimum of manual interaction. The proposed algorithm implicitly generates realistic body deformations and can easily transfer motions between human subjects of completely different shape and proportions. We feature a working prototype system that demonstrates that our method can generate convincing lifelike character animations from marker-less optical motion capture data. %B IEEE 3DTV Conference %E Triantafyllidis, Georgios; Onural, Levent %P 4379409.1 - 4 %I IEEE %@ 978-1-4244-0721-7
De Aguiar, E., Zayer, R., Theobalt, C., Magnor, M., and Seidel, H.-P. 2007b. A Simple Framework for Natural Animation of Digitized Models. SIBGRAPI’07 - XX Brazilian Symposium on Computer Graphics and Image Processing, IEEE.
Abstract
We present a versatile, fast and simple framework to generate animations of <br>scanned human characters from input optical motion capture data. Our method is <br>purely meshbased and requires only a minimum of manual interaction. The only <br>manual step needed to create moving virtual people is the placement of a sparse <br>set of correspondences between the input data and the mesh to be animated. The <br>proposed algorithm implicitly generates realistic body deformations, and can <br>easily transfer motions between human<br>subjects of completely different shape and proportions. We feature a working <br>prototype system that demonstrates that our method can generate convincing <br>lifelike character animations directly from optical motion capture data.
Export
BibTeX
@inproceedings{Zayer-et-al_SIBGRAPI07, TITLE = {A Simple Framework for Natural Animation of Digitized Models}, AUTHOR = {de Aguiar, Edilson and Zayer, Rhaleb and Theobalt, Christian and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-0-7695-2996-7}, DOI = {10.1109/SIBGRAPI.2007.14}, LOCALID = {Local-ID: C12573CC004A8E26-70AD657405595382C12573B6000B8818-deAguiar2007_SIBGRAPI}, PUBLISHER = {IEEE}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {We present a versatile, fast and simple framework to generate animations of <br>scanned human characters from input optical motion capture data. Our method is <br>purely meshbased and requires only a minimum of manual interaction. The only <br>manual step needed to create moving virtual people is the placement of a sparse <br>set of correspondences between the input data and the mesh to be animated. The <br>proposed algorithm implicitly generates realistic body deformations, and can <br>easily transfer motions between human<br>subjects of completely different shape and proportions. We feature a working <br>prototype system that demonstrates that our method can generate convincing <br>lifelike character animations directly from optical motion capture data.}, BOOKTITLE = {SIBGRAPI'07 -- XX Brazilian Symposium on Computer Graphics and Image Processing}, PAGES = {3--10}, ADDRESS = {Belo Horizonte, Brazil}, }
Endnote
%0 Conference Proceedings %A de Aguiar, Edilson %A Zayer, Rhaleb %A Theobalt, Christian %A Magnor, Marcus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Programming Logics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Simple Framework for Natural Animation of Digitized Models : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1E3B-C %F EDOC: 356520 %R 10.1109/SIBGRAPI.2007.14 %F OTHER: Local-ID: C12573CC004A8E26-70AD657405595382C12573B6000B8818-deAguiar2007_SIBGRAPI %D 2007 %B XX Brazilian Symposium on Computer Graphics and Image Processing %Z date of event: 2007-10-07 - 2007-10-10 %C Belo Horizonte, Brazil %X We present a versatile, fast and simple framework to generate animations of <br>scanned human characters from input optical motion capture data. Our method is <br>purely meshbased and requires only a minimum of manual interaction. The only <br>manual step needed to create moving virtual people is the placement of a sparse <br>set of correspondences between the input data and the mesh to be animated. The <br>proposed algorithm implicitly generates realistic body deformations, and can <br>easily transfer motions between human<br>subjects of completely different shape and proportions. We feature a working <br>prototype system that demonstrates that our method can generate convincing <br>lifelike character animations directly from optical motion capture data. %B SIBGRAPI'07 - XX Brazilian Symposium on Computer Graphics and Image Processing %P 3 - 10 %I IEEE %@ 978-0-7695-2996-7
De Aguiar, E., Theobalt, C., Stoll, C., and Seidel, H.-P. 2007c. Rapid Animation of Laser-scanned Humans. 2007 IEEE Virtual Reality Conference (VR 2007), IEEE.
Abstract
We present a simple and ef cient approach to turn laser-scanned human geometry <br>into a realistically moving virtual avatar. Instead of relying on the classical <br>skeleton-based animation pipeline, our method uses a mesh-based Laplacian <br>editing scheme to drive the motion of the scanned model. Our framework <br>elegantly solves the motion retargeting problem and produces realistic <br>non-rigid surface deformation with minimal user interaction. Realistic <br>animations can easily be generated from a variety of input motion descriptions, <br>which we exemplify by applying our method to both marker-free and marker-based <br>motion capture data.
Export
BibTeX
@inproceedings{Theobalt-et-al_VR07, TITLE = {Rapid Animation of Laser-scanned Humans}, AUTHOR = {de Aguiar, Edilson and Theobalt, Christian and Stoll, Carsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {1-4244-0906-3}, DOI = {10.1109/VR.2007.352486}, LOCALID = {Local-ID: C12573CC004A8E26-2CA6E55723D02E28C12572580057BB71-deAguiar_vr2007}, PUBLISHER = {IEEE}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {We present a simple and ef cient approach to turn laser-scanned human geometry <br>into a realistically moving virtual avatar. Instead of relying on the classical <br>skeleton-based animation pipeline, our method uses a mesh-based Laplacian <br>editing scheme to drive the motion of the scanned model. Our framework <br>elegantly solves the motion retargeting problem and produces realistic <br>non-rigid surface deformation with minimal user interaction. Realistic <br>animations can easily be generated from a variety of input motion descriptions, <br>which we exemplify by applying our method to both marker-free and marker-based <br>motion capture data.}, BOOKTITLE = {2007 IEEE Virtual Reality Conference (VR 2007)}, EDITOR = {Sherman, William and Lin, Ming and Steed, Anthony}, PAGES = {223--226}, ADDRESS = {Charlotte, NC, USA}, }
Endnote
%0 Conference Proceedings %A de Aguiar, Edilson %A Theobalt, Christian %A Stoll, Carsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Programming Logics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Rapid Animation of Laser-scanned Humans : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2074-A %F EDOC: 356604 %R 10.1109/VR.2007.352486 %F OTHER: Local-ID: C12573CC004A8E26-2CA6E55723D02E28C12572580057BB71-deAguiar_vr2007 %D 2007 %B 2007 IEEE Virtual Reality Conference %Z date of event: 2007-03-10 - 2007-03-14 %C Charlotte, NC, USA %X We present a simple and ef cient approach to turn laser-scanned human geometry <br>into a realistically moving virtual avatar. Instead of relying on the classical <br>skeleton-based animation pipeline, our method uses a mesh-based Laplacian <br>editing scheme to drive the motion of the scanned model. Our framework <br>elegantly solves the motion retargeting problem and produces realistic <br>non-rigid surface deformation with minimal user interaction. Realistic <br>animations can easily be generated from a variety of input motion descriptions, <br>which we exemplify by applying our method to both marker-free and marker-based <br>motion capture data. %B 2007 IEEE Virtual Reality Conference %E Sherman, William; Lin, Ming; Steed, Anthony %P 223 - 226 %I IEEE %@ 1-4244-0906-3
De Aguiar, E., Theobalt, C., Stoll, C., and Seidel, H.-P. 2007d. Marker-less Deformable Mesh Tracking for Human Shape and Motion Capture. 2007 IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2007), IEEE.
Abstract
We present a novel algorithm to jointly capture the motion and the dynamic <br>shape of humans from <br>multiple video streams without using optical markers. Instead of relying on <br>kinematic skeletons, <br>as traditional motion capture methods, our approach uses a deformable <br>high-quality mesh of a human <br>as scene representation. It jointly uses an image-based <br>\mbox{3D} correspondence estimation algorithm and a fast <br>Laplacian mesh deformation scheme to capture both <br>motion and surface deformation <br>of the actor from the input video footage. As opposed to many related methods, <br>our algorithm can track people wearing wide apparel, it can straightforwardly <br>be applied to <br>any type of subject, e.g. animals, and it preserves the connectivity <br>of the mesh over time. We demonstrate the performance of our approach using <br>synthetic and <br>captured real-world video sequences and validate its accuracy by comparison to <br>the ground truth.
Export
BibTeX
@inproceedings{Theobalt-et-al_CVPR07, TITLE = {Marker-less Deformable Mesh Tracking for Human Shape and Motion Capture}, AUTHOR = {de Aguiar, Edilson and Theobalt, Christian and Stoll, Carsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {1-424-41179-3}, DOI = {10.1109/CVPR.2007.383296}, LOCALID = {Local-ID: C12573CC004A8E26-2B73D31FDAD9D1EFC125729D0055DDAA-deAguiarCVPR2007}, PUBLISHER = {IEEE}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {We present a novel algorithm to jointly capture the motion and the dynamic <br>shape of humans from <br>multiple video streams without using optical markers. Instead of relying on <br>kinematic skeletons, <br>as traditional motion capture methods, our approach uses a deformable <br>high-quality mesh of a human <br>as scene representation. It jointly uses an image-based <br>\mbox{3D} correspondence estimation algorithm and a fast <br>Laplacian mesh deformation scheme to capture both <br>motion and surface deformation <br>of the actor from the input video footage. As opposed to many related methods, <br>our algorithm can track people wearing wide apparel, it can straightforwardly <br>be applied to <br>any type of subject, e.g. animals, and it preserves the connectivity <br>of the mesh over time. We demonstrate the performance of our approach using <br>synthetic and <br>captured real-world video sequences and validate its accuracy by comparison to <br>the ground truth.}, BOOKTITLE = {2007 IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2007)}, PAGES = {2502--2509}, ADDRESS = {Minneapolis, MN, USA}, }
Endnote
%0 Conference Proceedings %A de Aguiar, Edilson %A Theobalt, Christian %A Stoll, Carsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Programming Logics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Marker-less Deformable Mesh Tracking for Human Shape and Motion Capture : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1FC6-A %F EDOC: 356597 %R 10.1109/CVPR.2007.383296 %F OTHER: Local-ID: C12573CC004A8E26-2B73D31FDAD9D1EFC125729D0055DDAA-deAguiarCVPR2007 %D 2007 %B 2007 IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2007-06-17 - 2007-06-22 %C Minneapolis, MN, USA %X We present a novel algorithm to jointly capture the motion and the dynamic <br>shape of humans from <br>multiple video streams without using optical markers. Instead of relying on <br>kinematic skeletons, <br>as traditional motion capture methods, our approach uses a deformable <br>high-quality mesh of a human <br>as scene representation. It jointly uses an image-based <br>\mbox{3D} correspondence estimation algorithm and a fast <br>Laplacian mesh deformation scheme to capture both <br>motion and surface deformation <br>of the actor from the input video footage. As opposed to many related methods, <br>our algorithm can track people wearing wide apparel, it can straightforwardly <br>be applied to <br>any type of subject, e.g. animals, and it preserves the connectivity <br>of the mesh over time. We demonstrate the performance of our approach using <br>synthetic and <br>captured real-world video sequences and validate its accuracy by comparison to <br>the ground truth. %B 2007 IEEE Conference on Computer Vision and Pattern Recognition %P 2502 - 2509 %I IEEE %@ 1-424-41179-3
De Aguiar, E., Theobalt, C., Stoll, C., and Seidel, H.-P. 2007e. Marker-less 3D Feature Tracking for Mesh-based Motion Capture. Human Motion - Understanding, Modeling, Capture and Animation, Springer.
Abstract
We present a novel algorithm that robustly tracks 3D trajectories<br>of features on a moving human who has been recorded with multiple video <br>cameras. Our method does so without special markers in the scene and can be <br>used to track subjects wearing everyday apparel. By using the paths of the 3D <br>points as constraints in a fast mesh deformation approach, we can directly <br>animate a static human body scan such that it performs the same motion as the <br>captured ubject. Our method can therefore be used to directly animate high <br>quality geometry models from unaltered video data which opens the door to new <br>applications in motion capture, 3D Video and computer animation. Since our <br>method does not require a kinematic skeleton and only employs a handful of <br>feature trajectories to generate ifelike animations with realistic surface <br>deformations, it can lso be used to track subjects wearing wide apparel, and <br>even nimals. We demonstrate the performance of our approach using several <br>captured real-world sequences, and also validate its accuracy.
Export
BibTeX
@inproceedings{deAguiar2006_HM07, TITLE = {Marker-less {3D} Feature Tracking for Mesh-based Motion Capture}, AUTHOR = {de Aguiar, Edilson and Theobalt, Christian and Stoll, Carsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-540-75702-3}, DOI = {10.1007/978-3-540-75703-0_1}, LOCALID = {Local-ID: C12573CC004A8E26-961A40E744D69B42C12573B6000C8118-deAguiar2006_HM07}, PUBLISHER = {Springer}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {We present a novel algorithm that robustly tracks 3D trajectories<br>of features on a moving human who has been recorded with multiple video <br>cameras. Our method does so without special markers in the scene and can be <br>used to track subjects wearing everyday apparel. By using the paths of the 3D <br>points as constraints in a fast mesh deformation approach, we can directly <br>animate a static human body scan such that it performs the same motion as the <br>captured ubject. Our method can therefore be used to directly animate high <br>quality geometry models from unaltered video data which opens the door to new <br>applications in motion capture, 3D Video and computer animation. Since our <br>method does not require a kinematic skeleton and only employs a handful of <br>feature trajectories to generate ifelike animations with realistic surface <br>deformations, it can lso be used to track subjects wearing wide apparel, and <br>even nimals. We demonstrate the performance of our approach using several <br>captured real-world sequences, and also validate its accuracy.}, BOOKTITLE = {Human Motion -- Understanding, Modeling, Capture and Animation}, EDITOR = {Elgammal, Ahmed and Rosenhahn, Bodo and Klette, Reinhard}, PAGES = {1--15}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {4814}, ADDRESS = {Rio de Janeiro, Brazil}, }
Endnote
%0 Conference Proceedings %A de Aguiar, Edilson %A Theobalt, Christian %A Stoll, Carsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Marker-less 3D Feature Tracking for Mesh-based Motion Capture : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1FC3-0 %F EDOC: 356521 %R 10.1007/978-3-540-75703-0_1 %F OTHER: Local-ID: C12573CC004A8E26-961A40E744D69B42C12573B6000C8118-deAguiar2006_HM07 %D 2007 %B Second Workshop on Human Motion %Z date of event: 2007-10-20 - 2007-10-20 %C Rio de Janeiro, Brazil %X We present a novel algorithm that robustly tracks 3D trajectories<br>of features on a moving human who has been recorded with multiple video <br>cameras. Our method does so without special markers in the scene and can be <br>used to track subjects wearing everyday apparel. By using the paths of the 3D <br>points as constraints in a fast mesh deformation approach, we can directly <br>animate a static human body scan such that it performs the same motion as the <br>captured ubject. Our method can therefore be used to directly animate high <br>quality geometry models from unaltered video data which opens the door to new <br>applications in motion capture, 3D Video and computer animation. Since our <br>method does not require a kinematic skeleton and only employs a handful of <br>feature trajectories to generate ifelike animations with realistic surface <br>deformations, it can lso be used to track subjects wearing wide apparel, and <br>even nimals. We demonstrate the performance of our approach using several <br>captured real-world sequences, and also validate its accuracy. %B Human Motion - Understanding, Modeling, Capture and Animation %E Elgammal, Ahmed; Rosenhahn, Bodo; Klette, Reinhard %P 1 - 15 %I Springer %@ 978-3-540-75702-3 %B Lecture Notes in Computer Science %N 4814 %U https://rdcu.be/dIMOU
Chen, T., Lensch, H.P.A., Fuchs, C., and Seidel, H.-P. 2007. Polarization and Phase-shifting for 3D Scanning of Translucent Objects. 2007 IEEE Conference on Computer Vision and Pattern Recognition (CVPG 2007), IEEE.
Abstract
Translucent objects pose a difficult problem for traditional structured light <br>3D scanning techniques. Subsurface scattering corrupts the range estimation in <br>two ways: by drastically reducing the signal-to-noise ratio and by shifting the <br>intensity peak beneath the surface to a point which does not coincide with the <br>point of incidence. In this paper we analyze and compare two descattering <br>methods in order to obtain reliable 3D coordinates for translucent objects. By <br>using polarization-difference imaging, subsurface scattering can be filtered <br>out because multiple scattering randomizes the polarization direction of light <br>while the surface reflectance partially keeps the polarization direction of the <br>illumination. The descattered reflectance can be used for reliable 3D <br>reconstruction using traditional optical 3D scanning techniques, such as <br>structured light. Phase-shifting is another effective descattering technique if <br>the frequency of the projected pattern is sufficiently high. We demonstrate the <br>performance of these two techniques and the combination of them on scanning <br>real-world translucent objects.
Export
BibTeX
@inproceedings{Chen-et-al_CVPG07, TITLE = {Polarization and Phase-shifting for {3D} Scanning of Translucent Objects}, AUTHOR = {Chen, Tongbo and Lensch, Hendrik P. A. and Fuchs, Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {1-4244-1180-7}, DOI = {10.1109/CVPR.2007.383209}, LOCALID = {Local-ID: C12573CC004A8E26-99E94021099C65A5C125738300505E2A-Chen:2006:PAP}, PUBLISHER = {IEEE}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {Translucent objects pose a difficult problem for traditional structured light <br>3D scanning techniques. Subsurface scattering corrupts the range estimation in <br>two ways: by drastically reducing the signal-to-noise ratio and by shifting the <br>intensity peak beneath the surface to a point which does not coincide with the <br>point of incidence. In this paper we analyze and compare two descattering <br>methods in order to obtain reliable 3D coordinates for translucent objects. By <br>using polarization-difference imaging, subsurface scattering can be filtered <br>out because multiple scattering randomizes the polarization direction of light <br>while the surface reflectance partially keeps the polarization direction of the <br>illumination. The descattered reflectance can be used for reliable 3D <br>reconstruction using traditional optical 3D scanning techniques, such as <br>structured light. Phase-shifting is another effective descattering technique if <br>the frequency of the projected pattern is sufficiently high. We demonstrate the <br>performance of these two techniques and the combination of them on scanning <br>real-world translucent objects.}, BOOKTITLE = {2007 IEEE Conference on Computer Vision and Pattern Recognition (CVPG 2007)}, PAGES = {1829--1836}, ADDRESS = {Minneapolis, MN, USA}, }
Endnote
%0 Conference Proceedings %A Chen, Tongbo %A Lensch, Hendrik P. A. %A Fuchs, Christian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Polarization and Phase-shifting for 3D Scanning of Translucent Objects : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2053-3 %F EDOC: 356575 %R 10.1109/CVPR.2007.383209 %F OTHER: Local-ID: C12573CC004A8E26-99E94021099C65A5C125738300505E2A-Chen:2006:PAP %D 2007 %B 2007 IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2007-06-17 - 2007-06-22 %C Minneapolis, MN, USA %X Translucent objects pose a difficult problem for traditional structured light <br>3D scanning techniques. Subsurface scattering corrupts the range estimation in <br>two ways: by drastically reducing the signal-to-noise ratio and by shifting the <br>intensity peak beneath the surface to a point which does not coincide with the <br>point of incidence. In this paper we analyze and compare two descattering <br>methods in order to obtain reliable 3D coordinates for translucent objects. By <br>using polarization-difference imaging, subsurface scattering can be filtered <br>out because multiple scattering randomizes the polarization direction of light <br>while the surface reflectance partially keeps the polarization direction of the <br>illumination. The descattered reflectance can be used for reliable 3D <br>reconstruction using traditional optical 3D scanning techniques, such as <br>structured light. Phase-shifting is another effective descattering technique if <br>the frequency of the projected pattern is sufficiently high. We demonstrate the <br>performance of these two techniques and the combination of them on scanning <br>real-world translucent objects. %B 2007 IEEE Conference on Computer Vision and Pattern Recognition %P 1829 - 1836 %I IEEE %@ 1-4244-1180-7
Brox, T., Rosenhahn, B., Cremers, D., and Seidel, H.-P. 2007. Nonparametric Density Estimation with Adaptive Anisotropic Kernels for Human Motion Tracking. Human Motion - Understanding, Modeling, Capture and Animation, Springer.
Export
BibTeX
@inproceedings{Brox-et-al_HM07, TITLE = {Nonparametric Density Estimation with Adaptive Anisotropic Kernels for Human Motion Tracking}, AUTHOR = {Brox, Thomas and Rosenhahn, Bodo and Cremers, Daniel and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-540-75702-3}, DOI = {10.1007/978-3-540-75703-0_11}, LOCALID = {Local-ID: C12573CC004A8E26-CADCD4114A0636E1C12573C50044F480-BroxICCVHMWS2007}, PUBLISHER = {Springer}, YEAR = {2007}, DATE = {2007}, BOOKTITLE = {Human Motion -- Understanding, Modeling, Capture and Animation}, EDITOR = {Elgammal, Ahmed and Rosenhahn, Bodo and Klette, Reinhard}, PAGES = {152--165}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {4814}, ADDRESS = {Rio de Janeiro, Brazil}, }
Endnote
%0 Conference Proceedings %A Brox, Thomas %A Rosenhahn, Bodo %A Cremers, Daniel %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Nonparametric Density Estimation with Adaptive Anisotropic Kernels for Human Motion Tracking : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2005-6 %F EDOC: 356504 %R 10.1007/978-3-540-75703-0_11 %F OTHER: Local-ID: C12573CC004A8E26-CADCD4114A0636E1C12573C50044F480-BroxICCVHMWS2007 %D 2007 %B Second Workshop on Human Motion %Z date of event: 2007-10-20 - 2007-10-20 %C Rio de Janeiro, Brazil %B Human Motion - Understanding, Modeling, Capture and Animation %E Elgammal, Ahmed; Rosenhahn, Bodo; Klette, Reinhard %P 152 - 165 %I Springer %@ 3-540-75702-3 %B Lecture Notes in Computer Science %N 4814 %U https://rdcu.be/dIMKl
Blanz, V., Scherbaum, K., and Seidel, H.-P. 2007. Fitting a Morphable Model to 3D Scans of Faces. Proceedings of the IEEE 11th International Conference on Computer Vision (ICCV 2007), IEEE.
Abstract
This paper presents a top-down approach to 3D data analysis by fitting a <br>Morphable Model to scans of faces. In a unified framework, the algorithm <br>optimizes shape, texture, pose and illumination simultaneously. The algorithm <br>can be used as a core component in face recognition from scans. In an <br>analysis-by-synthesis approach, raw scans are transformed into a PCA-based <br>representation that is robust with respect to changes in pose and illumination. <br>Illumination conditions are estimated in an explicit simulation that involves <br>specular and diffuse components. The algorithm inverts the effect of shading in <br>order to obtain the diffuse reflectance in each point of the facial surface. <br>Our results include illumination correction, surface completion and face <br>recognition on the FRGC database of scans.
Export
BibTeX
@inproceedings{BlaScheSei07, TITLE = {Fitting a Morphable Model to {3D} Scans of Faces}, AUTHOR = {Blanz, Volker and Scherbaum, Kristina and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4244-1631-8}, DOI = {10.1109/ICCV.2007.4409029}, LOCALID = {Local-ID: C12573CC004A8E26-D7EEDAC500D62994C12573AE005CD90B-BlaScheSei07}, PUBLISHER = {IEEE}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {This paper presents a top-down approach to 3D data analysis by fitting a <br>Morphable Model to scans of faces. In a unified framework, the algorithm <br>optimizes shape, texture, pose and illumination simultaneously. The algorithm <br>can be used as a core component in face recognition from scans. In an <br>analysis-by-synthesis approach, raw scans are transformed into a PCA-based <br>representation that is robust with respect to changes in pose and illumination. <br>Illumination conditions are estimated in an explicit simulation that involves <br>specular and diffuse components. The algorithm inverts the effect of shading in <br>order to obtain the diffuse reflectance in each point of the facial surface. <br>Our results include illumination correction, surface completion and face <br>recognition on the FRGC database of scans.}, BOOKTITLE = {Proceedings of the IEEE 11th International Conference on Computer Vision (ICCV 2007)}, PAGES = {1--8}, EID = {4409029}, ADDRESS = {Rio de Janeiro, Brasil}, }
Endnote
%0 Conference Proceedings %A Blanz, Volker %A Scherbaum, Kristina %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Fitting a Morphable Model to 3D Scans of Faces : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1F45-E %F EDOC: 356527 %R 10.1109/ICCV.2007.4409029 %F OTHER: Local-ID: C12573CC004A8E26-D7EEDAC500D62994C12573AE005CD90B-BlaScheSei07 %D 2007 %B IEEE 11th International Conference on Computer Vision %Z date of event: 2007-10-14 - 2007-10-20 %C Rio de Janeiro, Brasil %X This paper presents a top-down approach to 3D data analysis by fitting a <br>Morphable Model to scans of faces. In a unified framework, the algorithm <br>optimizes shape, texture, pose and illumination simultaneously. The algorithm <br>can be used as a core component in face recognition from scans. In an <br>analysis-by-synthesis approach, raw scans are transformed into a PCA-based <br>representation that is robust with respect to changes in pose and illumination. <br>Illumination conditions are estimated in an explicit simulation that involves <br>specular and diffuse components. The algorithm inverts the effect of shading in <br>order to obtain the diffuse reflectance in each point of the facial surface. <br>Our results include illumination correction, surface completion and face <br>recognition on the FRGC database of scans. %B Proceedings of the IEEE 11th International Conference on Computer Vision %P 1 - 8 %Z sequence number: 4409029 %I IEEE %@ 978-1-4244-1631-8
Bargmann, R., Blanz, V., and Seidel, H.-P. 2007. A nonlinear viseme model for triphone-based speech synthesis. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
This paper presents a representation of visemes that defines a measure of similarity between different visemes, and a system of viseme categories. The representation is derived from a statistical data analysis of feature points on 3D scans, using Locally Linear Embedding (LLE). The similarity measure determines which available viseme and triphones to use to synthesize 3D face animation for a novel audio file. From a corpus of dynamic recorded 3D mouth articulation data, our system is able to find the best suited sequence of triphones over which to interpolate while reusing the coarticulation information to obtain correct mouth movements over time. Due to the similarity measure, the system can deal with relatively small triphone databases and find the most appropriate candidates. With the selected sequence of database triphones, we can finally morph along the successive triphones to produce the final articulation animation. In an entirely data-driven approach, our automated procedure for defining viseme categories reproduces the groups of related visemes that are defined in the phonetics literature.
Export
BibTeX
@techreport{BargmannBlanzSeidel2007, TITLE = {A nonlinear viseme model for triphone-based speech synthesis}, AUTHOR = {Bargmann, Robert and Blanz, Volker and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2007-4-003}, NUMBER = {MPI-I-2007-4-003}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {This paper presents a representation of visemes that defines a measure of similarity between different visemes, and a system of viseme categories. The representation is derived from a statistical data analysis of feature points on 3D scans, using Locally Linear Embedding (LLE). The similarity measure determines which available viseme and triphones to use to synthesize 3D face animation for a novel audio file. From a corpus of dynamic recorded 3D mouth articulation data, our system is able to find the best suited sequence of triphones over which to interpolate while reusing the coarticulation information to obtain correct mouth movements over time. Due to the similarity measure, the system can deal with relatively small triphone databases and find the most appropriate candidates. With the selected sequence of database triphones, we can finally morph along the successive triphones to produce the final articulation animation. In an entirely data-driven approach, our automated procedure for defining viseme categories reproduces the groups of related visemes that are defined in the phonetics literature.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Bargmann, Robert %A Blanz, Volker %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A nonlinear viseme model for triphone-based speech synthesis : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-66DC-7 %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2007-4-003 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2007 %P 28 p. %X This paper presents a representation of visemes that defines a measure of similarity between different visemes, and a system of viseme categories. The representation is derived from a statistical data analysis of feature points on 3D scans, using Locally Linear Embedding (LLE). The similarity measure determines which available viseme and triphones to use to synthesize 3D face animation for a novel audio file. From a corpus of dynamic recorded 3D mouth articulation data, our system is able to find the best suited sequence of triphones over which to interpolate while reusing the coarticulation information to obtain correct mouth movements over time. Due to the similarity measure, the system can deal with relatively small triphone databases and find the most appropriate candidates. With the selected sequence of database triphones, we can finally morph along the successive triphones to produce the final articulation animation. In an entirely data-driven approach, our automated procedure for defining viseme categories reproduces the groups of related visemes that are defined in the phonetics literature. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Atcheson, B., Ihrke, I., Bradley, D., Heidrich, W., Magnor, M.A., and Seidel, H.-P. 2007. Imaging and 3D Tomographic Reconstruction of Time-varying, Inhomogeneous Refractive Index Fields. SIGGRAPH ’07: ACM SIGGRAPH 2007 sketches, ACM.
Export
BibTeX
@inproceedings{DBLP:conf/siggraph/AtchesonIBHMS07, TITLE = {Imaging and {3D} Tomographic Reconstruction of Time-varying, Inhomogeneous Refractive Index Fields}, AUTHOR = {Atcheson, Bradley and Ihrke, Ivo and Bradley, Derek and Heidrich, Wolfgang and Magnor, Marcus A. and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4503-4726-6}, DOI = {10.1145/1278780.1278819}, PUBLISHER = {ACM}, YEAR = {2007}, DATE = {2007}, BOOKTITLE = {SIGGRAPH '07: ACM SIGGRAPH 2007 sketches}, EDITOR = {Alexa, Marc and Finkelstein, Adam}, PAGES = {32--32}, ADDRESS = {San Diego, CA, USA}, }
Endnote
%0 Conference Proceedings %A Atcheson, Bradley %A Ihrke, Ivo %A Bradley, Derek %A Heidrich, Wolfgang %A Magnor, Marcus A. %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Imaging and 3D Tomographic Reconstruction of Time-varying, Inhomogeneous Refractive Index Fields : %G eng %U http://hdl.handle.net/21.11116/0000-000F-5040-E %R 10.1145/1278780.1278819 %D 2007 %B International Conference on Computer Graphics and Interactive Techniques 2007 %Z date of event: 2007-08-05 - 2007-08-09 %C San Diego, CA, USA %B SIGGRAPH '07: ACM SIGGRAPH 2007 sketches %E Alexa, Marc; Finkelstein, Adam %P 32 - 32 %I ACM %@ 978-1-4503-4726-6
Annen, T., Mertens, T., Bekaert, P., Seidel, H.-P., and Kautz, J. 2007. Convolution Shadow Maps. EGSR07: 18th Eurographics Symposium on Rendering, Eurographics.
Abstract
We present \emph{Convolution Shadow Maps}, a novel shadow representation that <br>affords efficient arbitrary linear filtering of shadows. Traditional shadow <br>mapping is inherently non-linear w.r.t.\ the stored depth values due to the <br>binary shadow test. We linearize the problem by approximating shadow maps as a <br>weighted summation of basis terms. We demonstrate the usefulness of<br>this representation and show that hardware-accelerated anti-aliasing <br>techniques, such as tri-linear filtering, can be applied naturally to <br>Convolution Shadow Maps. <br>This approach can be implemented very efficiently in current generation <br>graphics hardware<br>yielding real-time frame rates.
Export
BibTeX
@inproceedings{Annen-et-al_EGSR07, TITLE = {Convolution Shadow Maps}, AUTHOR = {Annen, Thomas and Mertens, Tom and Bekaert, Philippe and Seidel, Hans-Peter and Kautz, Jan}, LANGUAGE = {eng}, ISBN = {978-3-905673-52-4}, DOI = {10.2312/EGWR/EGSR07/051-060}, LOCALID = {Local-ID: C12573CC004A8E26-534CA48BCE4C3DB8C12572E300558631-CSM:EGSR:2007}, PUBLISHER = {Eurographics}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {We present \emph{Convolution Shadow Maps}, a novel shadow representation that <br>affords efficient arbitrary linear filtering of shadows. Traditional shadow <br>mapping is inherently non-linear w.r.t.\ the stored depth values due to the <br>binary shadow test. We linearize the problem by approximating shadow maps as a <br>weighted summation of basis terms. We demonstrate the usefulness of<br>this representation and show that hardware-accelerated anti-aliasing <br>techniques, such as tri-linear filtering, can be applied naturally to <br>Convolution Shadow Maps. <br>This approach can be implemented very efficiently in current generation <br>graphics hardware<br>yielding real-time frame rates.}, BOOKTITLE = {EGSR07: 18th Eurographics Symposium on Rendering}, EDITOR = {Fellner, Dieter and Spencer, Stephen}, PAGES = {51--60}, ADDRESS = {Grenoble, France}, }
Endnote
%0 Conference Proceedings %A Annen, Thomas %A Mertens, Tom %A Bekaert, Philippe %A Seidel, Hans-Peter %A Kautz, Jan %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Convolution Shadow Maps : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1EBE-8 %F EDOC: 356558 %R 10.2312/EGWR/EGSR07/051-060 %F OTHER: Local-ID: C12573CC004A8E26-534CA48BCE4C3DB8C12572E300558631-CSM:EGSR:2007 %D 2007 %B 18th Eurographics Symposium on Rendering %Z date of event: 2007-06-25 - 2007-06-27 %C Grenoble, France %X We present \emph{Convolution Shadow Maps}, a novel shadow representation that <br>affords efficient arbitrary linear filtering of shadows. Traditional shadow <br>mapping is inherently non-linear w.r.t.\ the stored depth values due to the <br>binary shadow test. We linearize the problem by approximating shadow maps as a <br>weighted summation of basis terms. We demonstrate the usefulness of<br>this representation and show that hardware-accelerated anti-aliasing <br>techniques, such as tri-linear filtering, can be applied naturally to <br>Convolution Shadow Maps. <br>This approach can be implemented very efficiently in current generation <br>graphics hardware<br>yielding real-time frame rates. %B EGSR07: 18th Eurographics Symposium on Rendering %E Fellner, Dieter; Spencer, Stephen %P 51 - 60 %I Eurographics %@ 978-3-905673-52-4
Ahmed, N., Theobalt, C., Magnor, M., and Seidel, H.-P. 2007a. Spatio-Temporal Registration Techniques for Relightable 3D Video. 2007 IEEE International Conference on Image Processing (ICIP 2007), IEEE.
Abstract
By jointly applying a model-based marker-less motion capture approach and <br>multi-view texture generation 3D Videos of human actors can be reconstructed <br>from multi-view video streams. If the input data were recorded under calibrated <br>lighting, the texture information can also be used to measure time-varying <br>surface reflectance. This way, 3D videos can be realistically displayed under <br>novel lighting conditions. Reflectance estimation is only feasible if the <br>multi-view texture-to-surface registration is consistent over time. In this <br>paper, we propose two image-based warping methods that compensate registration <br>errors due to inaccurate model geometry and shifting of apparel over the body.
Export
BibTeX
@inproceedings{Ahmed-et-al_ICIP07, TITLE = {Spatio-Temporal Registration Techniques for Relightable {3D} Video}, AUTHOR = {Ahmed, Naveed and Theobalt, Christian and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4244-1437-6}, DOI = {10.1109/ICIP.2007.4379202}, LOCALID = {Local-ID: C12573CC004A8E26-E0089B21706B6F90C12573B1004C02B9-AhmedICIP07}, PUBLISHER = {IEEE}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {By jointly applying a model-based marker-less motion capture approach and <br>multi-view texture generation 3D Videos of human actors can be reconstructed <br>from multi-view video streams. If the input data were recorded under calibrated <br>lighting, the texture information can also be used to measure time-varying <br>surface reflectance. This way, 3D videos can be realistically displayed under <br>novel lighting conditions. Reflectance estimation is only feasible if the <br>multi-view texture-to-surface registration is consistent over time. In this <br>paper, we propose two image-based warping methods that compensate registration <br>errors due to inaccurate model geometry and shifting of apparel over the body.}, BOOKTITLE = {2007 IEEE International Conference on Image Processing (ICIP 2007)}, PAGES = {501--504}, ADDRESS = {San Antonio, TX, USA}, }
Endnote
%0 Conference Proceedings %A Ahmed, Naveed %A Theobalt, Christian %A Magnor, Marcus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Programming Logics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Spatio-Temporal Registration Techniques for Relightable 3D Video : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-20C1-D %F EDOC: 356554 %R 10.1109/ICIP.2007.4379202 %F OTHER: Local-ID: C12573CC004A8E26-E0089B21706B6F90C12573B1004C02B9-AhmedICIP07 %D 2007 %B 2007 IEEE International Conference on Image Processing %Z date of event: 2007-09-16 - 2007-09-19 %C San Antonio, TX, USA %X By jointly applying a model-based marker-less motion capture approach and <br>multi-view texture generation 3D Videos of human actors can be reconstructed <br>from multi-view video streams. If the input data were recorded under calibrated <br>lighting, the texture information can also be used to measure time-varying <br>surface reflectance. This way, 3D videos can be realistically displayed under <br>novel lighting conditions. Reflectance estimation is only feasible if the <br>multi-view texture-to-surface registration is consistent over time. In this <br>paper, we propose two image-based warping methods that compensate registration <br>errors due to inaccurate model geometry and shifting of apparel over the body. %B 2007 IEEE International Conference on Image Processing %P 501 - 504 %I IEEE %@ 978-1-4244-1437-6
Ahmed, N., Theobalt, C., and Seidel, H.-P. 2007b. Spatio-temporal Reflectance Sharing for Relightable 3D Video. Computer Vision/Computer Graphics Collaboration Techniques (MIRAGE 2007), Springer.
Abstract
In our previous work, we have shown that by means of a model based approach, <br>relightable free viewpoint videos of human actors can be reconstructed from <br>only a handful of multi view video streams recorded under calibrated <br>illumination. To achieve this purpose, we employ a marker free motion capture <br>approach to measure dynamic human scene geometry. Reflectance samples for each <br>surface point are captured by exploiting the fact that, due to the person's <br>motion, each surface location is, over time, exposed to the acquisition sensors <br>under varying orientations. Although this is the first setup of its kind to <br>measure surface reflectance from footage of arbitrary human performances, our <br>approach may lead to a biased sampling of surface reflectance since each <br>surface point is only seen under a limited number of half vector directions. We <br>thus propose in this paper a novel algorithm that reduces the bias in BRDF <br>estimates of a single surface point by cleverly taking into account reflectance <br>samples from other surface locations made of similar material. We demonstrate <br>the improvements achieved with this spatio temporal reflectance sharing <br>approach both visually and quantitatively.
Export
BibTeX
@inproceedings{Ahmed-et-al_MIRAGE07, TITLE = {Spatio-temporal Reflectance Sharing for Relightable {3D} Video}, AUTHOR = {Ahmed, Naveed and Theobalt, Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-540-71456-9}, DOI = {10.1007/978-3-540-71457-6_5}, LOCALID = {Local-ID: C12573CC004A8E26-1FCEB1FF874D0295C1257296004D07CC-nahmedMirage2007}, PUBLISHER = {Springer}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {In our previous work, we have shown that by means of a model based approach, <br>relightable free viewpoint videos of human actors can be reconstructed from <br>only a handful of multi view video streams recorded under calibrated <br>illumination. To achieve this purpose, we employ a marker free motion capture <br>approach to measure dynamic human scene geometry. Reflectance samples for each <br>surface point are captured by exploiting the fact that, due to the person's <br>motion, each surface location is, over time, exposed to the acquisition sensors <br>under varying orientations. Although this is the first setup of its kind to <br>measure surface reflectance from footage of arbitrary human performances, our <br>approach may lead to a biased sampling of surface reflectance since each <br>surface point is only seen under a limited number of half vector directions. We <br>thus propose in this paper a novel algorithm that reduces the bias in BRDF <br>estimates of a single surface point by cleverly taking into account reflectance <br>samples from other surface locations made of similar material. We demonstrate <br>the improvements achieved with this spatio temporal reflectance sharing <br>approach both visually and quantitatively.}, BOOKTITLE = {Computer Vision/Computer Graphics Collaboration Techniques (MIRAGE 2007)}, EDITOR = {Gagalowicz, Andr{\'e} and Philips, Wilfried}, PAGES = {47--58}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {4418}, ADDRESS = {Rocquencourt, France}, }
Endnote
%0 Conference Proceedings %A Ahmed, Naveed %A Theobalt, Christian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Programming Logics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Spatio-temporal Reflectance Sharing for Relightable 3D Video : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-20BE-6 %F EDOC: 356632 %R 10.1007/978-3-540-71457-6_5 %F OTHER: Local-ID: C12573CC004A8E26-1FCEB1FF874D0295C1257296004D07CC-nahmedMirage2007 %D 2007 %B Third International Conference on Computer Vision/Computer Graphics %Z date of event: 2007-03-28 - 2007-03-30 %C Rocquencourt, France %X In our previous work, we have shown that by means of a model based approach, <br>relightable free viewpoint videos of human actors can be reconstructed from <br>only a handful of multi view video streams recorded under calibrated <br>illumination. To achieve this purpose, we employ a marker free motion capture <br>approach to measure dynamic human scene geometry. Reflectance samples for each <br>surface point are captured by exploiting the fact that, due to the person's <br>motion, each surface location is, over time, exposed to the acquisition sensors <br>under varying orientations. Although this is the first setup of its kind to <br>measure surface reflectance from footage of arbitrary human performances, our <br>approach may lead to a biased sampling of surface reflectance since each <br>surface point is only seen under a limited number of half vector directions. We <br>thus propose in this paper a novel algorithm that reduces the bias in BRDF <br>estimates of a single surface point by cleverly taking into account reflectance <br>samples from other surface locations made of similar material. We demonstrate <br>the improvements achieved with this spatio temporal reflectance sharing <br>approach both visually and quantitatively. %B Computer Vision/Computer Graphics Collaboration Techniques %E Gagalowicz, Andr&#233;; Philips, Wilfried %P 47 - 58 %I Springer %@ 978-3-540-71456-9 %B Lecture Notes in Computer Science %N 4418 %U https://rdcu.be/dIMhW
2006
Ziegler, G., Theobalt, C., and Seidel, H.-P. 2006a. On-the-fly Point Clouds through Histogram Pyramids. 11th International Fall Workshop on Vision, Modeling and Visualization 2006 (VMV2006), Aka.
Abstract
Image Pyramids, as created during a reduction process of 2D image maps, are frequently used in porting non-local algorithms to graphics hardware. A Histogram pyramid (short: HistoPyramid), a special version of image pyramid, collects the number of active entries in a 2D image. We show how a HistoPyramid can be utilized as an implicit indexing data structure, allowing us to convert a sparse 3D volume into a point cloud entirely on the graphics hardware. In the generalized form, the algorithm reduces a highly sparse matrix with N elements to a list of its M active entries in O(N) + M (log N) steps, despite the restricted graphics hardware architecture. Our method can be used to deliver new and unusual visual effects, such as particle explosions of arbitrary geometry models. Beyond this, the algorithm is able to accelerate feature detection, pixel classification and binning, and enable high-speed sparse matrix compression.
Export
BibTeX
@inproceedings{Ziegler2005, TITLE = {On-the-fly Point Clouds through Histogram Pyramids}, AUTHOR = {Ziegler, Gernot and Theobalt, Christian and Seidel, Hans-Peter}, EDITOR = {Kobbelt, Leif and Kuhlen, Torsten and Aach, Til and Westermann, R{\"u}diger}, LANGUAGE = {eng}, ISBN = {978-1-58603-688-1}, LOCALID = {Local-ID: C125675300671F7B-B38C33C22FBBC8DAC125720A00430B32-Ziegler2005}, PUBLISHER = {Aka}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {Image Pyramids, as created during a reduction process of 2D image maps, are frequently used in porting non-local algorithms to graphics hardware. A Histogram pyramid (short: HistoPyramid), a special version of image pyramid, collects the number of active entries in a 2D image. We show how a HistoPyramid can be utilized as an implicit indexing data structure, allowing us to convert a sparse 3D volume into a point cloud entirely on the graphics hardware. In the generalized form, the algorithm reduces a highly sparse matrix with N elements to a list of its M active entries in O(N) + M (log N) steps, despite the restricted graphics hardware architecture. Our method can be used to deliver new and unusual visual effects, such as particle explosions of arbitrary geometry models. Beyond this, the algorithm is able to accelerate feature detection, pixel classification and binning, and enable high-speed sparse matrix compression.}, BOOKTITLE = {11th International Fall Workshop on Vision, Modeling and Visualization 2006 (VMV2006)}, PAGES = {137--144}, }
Endnote
%0 Conference Proceedings %A Ziegler, Gernot %A Theobalt, Christian %A Seidel, Hans-Peter %E Kobbelt, Leif %E Kuhlen, Torsten %E Aach, Til %E Westermann, R&#252;diger %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Programming Logics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T On-the-fly Point Clouds through Histogram Pyramids : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2399-D %F EDOC: 314595 %F OTHER: Local-ID: C125675300671F7B-B38C33C22FBBC8DAC125720A00430B32-Ziegler2005 %I Aka %D 2006 %B Untitled Event %Z date of event: 2006-11-22 - %C Aachen, Germany %X Image Pyramids, as created during a reduction process of 2D image maps, are frequently used in porting non-local algorithms to graphics hardware. A Histogram pyramid (short: HistoPyramid), a special version of image pyramid, collects the number of active entries in a 2D image. We show how a HistoPyramid can be utilized as an implicit indexing data structure, allowing us to convert a sparse 3D volume into a point cloud entirely on the graphics hardware. In the generalized form, the algorithm reduces a highly sparse matrix with N elements to a list of its M active entries in O(N) + M (log N) steps, despite the restricted graphics hardware architecture. Our method can be used to deliver new and unusual visual effects, such as particle explosions of arbitrary geometry models. Beyond this, the algorithm is able to accelerate feature detection, pixel classification and binning, and enable high-speed sparse matrix compression. %B 11th International Fall Workshop on Vision, Modeling and Visualization 2006 (VMV2006) %P 137 - 144 %I Aka %@ 978-1-58603-688-1
Ziegler, G., Tevs, A., Theobalt, C., and Seidel, H.-P. 2006b. GPU point list generation through histogram pyramids. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
Image Pyramids are frequently used in porting non-local algorithms to graphics hardware. A Histogram pyramid (short: HistoPyramid), a special version of image pyramid, sums up the number of active entries in a 2D image hierarchically. We show how a HistoPyramid can be utilized as an implicit indexing data structure, allowing us to convert a sparse matrix into a coordinate list of active cell entries (a point list) on graphics hardware . The algorithm reduces a highly sparse matrix with N elements to a list of its M active entries in O(N) + M (log N) steps, despite the restricted graphics hardware architecture. Applications are numerous, including feature detection, pixel classification and binning, conversion of 3D volumes to particle clouds and sparse matrix compression.
Export
BibTeX
@techreport{OhtakeBelyaevSeidel2004, TITLE = {{GPU} point list generation through histogram pyramids}, AUTHOR = {Ziegler, Gernot and Tevs, Art and Theobalt, Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2006-4-002}, NUMBER = {MPI-I-2006-4-002}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {Image Pyramids are frequently used in porting non-local algorithms to graphics hardware. A Histogram pyramid (short: HistoPyramid), a special version of image pyramid, sums up the number of active entries in a 2D image hierarchically. We show how a HistoPyramid can be utilized as an implicit indexing data structure, allowing us to convert a sparse matrix into a coordinate list of active cell entries (a point list) on graphics hardware . The algorithm reduces a highly sparse matrix with N elements to a list of its M active entries in O(N) + M (log N) steps, despite the restricted graphics hardware architecture. Applications are numerous, including feature detection, pixel classification and binning, conversion of 3D volumes to particle clouds and sparse matrix compression.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Ziegler, Gernot %A Tevs, Art %A Theobalt, Christian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T GPU point list generation through histogram pyramids : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-680E-9 %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2006-4-002 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2006 %P 13 p. %X Image Pyramids are frequently used in porting non-local algorithms to graphics hardware. A Histogram pyramid (short: HistoPyramid), a special version of image pyramid, sums up the number of active entries in a 2D image hierarchically. We show how a HistoPyramid can be utilized as an implicit indexing data structure, allowing us to convert a sparse matrix into a coordinate list of active cell entries (a point list) on graphics hardware . The algorithm reduces a highly sparse matrix with N elements to a list of its M active entries in O(N) + M (log N) steps, despite the restricted graphics hardware architecture. Applications are numerous, including feature detection, pixel classification and binning, conversion of 3D volumes to particle clouds and sparse matrix compression. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Zayer, R., Rössl, C., and Seidel, H.-P. 2006. Curvilinear Spherical Parameterization. IEEE International Conference on Shape Modeling and Applications 2006 (SMI 2006), IEEE.
Abstract
We present an efficient approach for solving the spherical parameterization <br>problem. The essence of the approach is to look for a solution in the <br>curvilinear coordinate system, without requiring the additional spherical <br>constraints usually needed in cartesian formulations. This setup allows us to <br>take full advantage of some existing techniques originally developed for planar <br>parameterization. Our results substantiate the efficiency of the method and <br>confirm its robustness. Meshes of non-trivial geometry with tens of thousands <br>of triangles are processed in a few seconds, always yielding bijective maps. <br>This computational achievement bridges a so far wide gap in performance between <br>spherical and planar parameterization.
Export
BibTeX
@inproceedings{Zayer-et-al_SMI06, TITLE = {Curvilinear Spherical Parameterization}, AUTHOR = {Zayer, Rhaleb and R{\"o}ssl, Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-2591-1}, DOI = {10.1109/SMI.2006.9}, LOCALID = {Local-ID: C125675300671F7B-13B358CD66F99E12C1257148002EF1A2-zayer:csp:2006}, PUBLISHER = {IEEE}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {We present an efficient approach for solving the spherical parameterization <br>problem. The essence of the approach is to look for a solution in the <br>curvilinear coordinate system, without requiring the additional spherical <br>constraints usually needed in cartesian formulations. This setup allows us to <br>take full advantage of some existing techniques originally developed for planar <br>parameterization. Our results substantiate the efficiency of the method and <br>confirm its robustness. Meshes of non-trivial geometry with tens of thousands <br>of triangles are processed in a few seconds, always yielding bijective maps. <br>This computational achievement bridges a so far wide gap in performance between <br>spherical and planar parameterization.}, BOOKTITLE = {IEEE International Conference on Shape Modeling and Applications 2006 (SMI 2006)}, PAGES = {57--64}, ADDRESS = {Matsushima, Japan}, }
Endnote
%0 Conference Proceedings %A Zayer, Rhaleb %A R&#246;ssl, Christian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Curvilinear Spherical Parameterization : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-227C-8 %F EDOC: 314374 %F OTHER: Local-ID: C125675300671F7B-13B358CD66F99E12C1257148002EF1A2-zayer:csp:2006 %R 10.1109/SMI.2006.9 %D 2006 %B IEEE International Conference on Shape Modeling and Applications 2006 %Z date of event: 2006-06-14 - 2006-06-16 %C Matsushima, Japan %X We present an efficient approach for solving the spherical parameterization <br>problem. The essence of the approach is to look for a solution in the <br>curvilinear coordinate system, without requiring the additional spherical <br>constraints usually needed in cartesian formulations. This setup allows us to <br>take full advantage of some existing techniques originally developed for planar <br>parameterization. Our results substantiate the efficiency of the method and <br>confirm its robustness. Meshes of non-trivial geometry with tens of thousands <br>of triangles are processed in a few seconds, always yielding bijective maps. <br>This computational achievement bridges a so far wide gap in performance between <br>spherical and planar parameterization. %B IEEE International Conference on Shape Modeling and Applications 2006 %P 57 - 64 %I IEEE %@ 0-7695-2591-1
Yoshizawa, S., Belyaev, A., and Seidel, H.-P. 2006. Smoothing by Example: Mesh Denoising by Averaging with Similarity-based Weights. IEEE International Conference on Shape Modeling and Applications 2006 (SMI 2006), IEEE.
Abstract
In this paper, we propose a new and powerful mesh/soup denoising <br>technique. Our approach is inspired by recent non-local image <br>denoising schemes and naturally extends bilateral mesh smoothing <br>methods. The main idea behind the approach is very simple.<br>A new position of vertex $P$ of a noisy mesh is obtained as <br>a weighted mean of mesh vertices $Q$ with nonlinear weights <br>reflecting a similarity between local neighborhoods of $P$ and $Q$.<br>We demonstrated that our technique outperforms recent <br>state-of-the-art smoothing methods.<br>We also suggest a new approach for comparing different <br>mesh/soup denoising methods.
Export
BibTeX
@inproceedings{Yoshizawa-et-al_SMI06, TITLE = {Smoothing by Example: Mesh Denoising by Averaging with Similarity-based Weights}, AUTHOR = {Yoshizawa, Shin and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-2591-1}, DOI = {10.1109/SMI.2006.38}, LOCALID = {Local-ID: C125675300671F7B-F4156CE7A4D6BFE1C125712E002DB493-YoshizawaSMI2006}, PUBLISHER = {IEEE}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {In this paper, we propose a new and powerful mesh/soup denoising <br>technique. Our approach is inspired by recent non-local image <br>denoising schemes and naturally extends bilateral mesh smoothing <br>methods. The main idea behind the approach is very simple.<br>A new position of vertex $P$ of a noisy mesh is obtained as <br>a weighted mean of mesh vertices $Q$ with nonlinear weights <br>reflecting a similarity between local neighborhoods of $P$ and $Q$.<br>We demonstrated that our technique outperforms recent <br>state-of-the-art smoothing methods.<br>We also suggest a new approach for comparing different <br>mesh/soup denoising methods.}, BOOKTITLE = {IEEE International Conference on Shape Modeling and Applications 2006 (SMI 2006)}, EDITOR = {Spagnuolo, M. and Belyaev, A. and Suzuki, H.}, PAGES = {38--44}, ADDRESS = {Matsushima, JAPAN}, }
Endnote
%0 Conference Proceedings %A Yoshizawa, Shin %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Smoothing by Example: Mesh Denoising by Averaging with Similarity-based Weights : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-23F4-E %F EDOC: 314655 %F OTHER: Local-ID: C125675300671F7B-F4156CE7A4D6BFE1C125712E002DB493-YoshizawaSMI2006 %R 10.1109/SMI.2006.38 %D 2006 %B IEEE International Conference on Shape Modeling and Applications 2006 %Z date of event: 2006-06-14 - 2006-06-16 %C Matsushima, JAPAN %X In this paper, we propose a new and powerful mesh/soup denoising <br>technique. Our approach is inspired by recent non-local image <br>denoising schemes and naturally extends bilateral mesh smoothing <br>methods. The main idea behind the approach is very simple.<br>A new position of vertex $P$ of a noisy mesh is obtained as <br>a weighted mean of mesh vertices $Q$ with nonlinear weights <br>reflecting a similarity between local neighborhoods of $P$ and $Q$.<br>We demonstrated that our technique outperforms recent <br>state-of-the-art smoothing methods.<br>We also suggest a new approach for comparing different <br>mesh/soup denoising methods. %B IEEE International Conference on Shape Modeling and Applications 2006 %E Spagnuolo, M.; Belyaev, A.; Suzuki, H. %P 38 - 44 %I IEEE %@ 0-7695-2591-1
Yoshida, A., Mantiuk, R., Myszkowski, K., and Seidel, H.-P. 2006. Analysis of Reproducing Real-world Appearance on Displays of Varying Dynamic Range. Computer Graphics Forum, Blackwell.
Abstract
We conduct a series of experiments to investigate the desired properties of a <br>tone mapping operator (TMO) and to design such an operator based on subjective <br>data. We propose a novel approach to the tone mapping problem, in which the <br>tone mapping is determined by the data from subjective experiments, rather than <br>an image processing algorithm or a visual model. To collect such data, a series <br>of experiments are conducted in which the subjects adjust three generic TMO <br>parameters: brightness, contrast and color saturation. In two experiments, the <br>subjects are to find a) the most preferred image without a reference image and <br>b) the closest image to the real-world scene which the subjects are confronted <br>with. The purpose of these experiments is to collect data for two rendering <br>goals of a TMO: rendering the most preferred image and preserving the fidelity <br>with the real world scene. The data provide an assessment for the most <br>intuitive control over the tone mapping parameters. Unlike most of the <br>researched TMOs that focus on rendering for standard low dynamic range <br>monitors, we consider a broad range of potential displays, each offering <br>different dynamic range and brightness. We simulate capabilities of such <br>displays on a high dynamic range (HDR) monitor. This lets us address the <br>question of whether tone mapping is needed for HDR displays.
Export
BibTeX
@inproceedings{Yoshida-et-al_EG06, TITLE = {Analysis of Reproducing Real-world Appearance on Displays of Varying Dynamic Range}, AUTHOR = {Yoshida, Akiko and Mantiuk, Rafa{\l} and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2006.00961.x}, LOCALID = {Local-ID: C12573CC004A8E26-36B5343ECEA5A706C125730D00546611-Yoshida_EG2006z}, PUBLISHER = {Blackwell}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {We conduct a series of experiments to investigate the desired properties of a <br>tone mapping operator (TMO) and to design such an operator based on subjective <br>data. We propose a novel approach to the tone mapping problem, in which the <br>tone mapping is determined by the data from subjective experiments, rather than <br>an image processing algorithm or a visual model. To collect such data, a series <br>of experiments are conducted in which the subjects adjust three generic TMO <br>parameters: brightness, contrast and color saturation. In two experiments, the <br>subjects are to find a) the most preferred image without a reference image and <br>b) the closest image to the real-world scene which the subjects are confronted <br>with. The purpose of these experiments is to collect data for two rendering <br>goals of a TMO: rendering the most preferred image and preserving the fidelity <br>with the real world scene. The data provide an assessment for the most <br>intuitive control over the tone mapping parameters. Unlike most of the <br>researched TMOs that focus on rendering for standard low dynamic range <br>monitors, we consider a broad range of potential displays, each offering <br>different dynamic range and brightness. We simulate capabilities of such <br>displays on a high dynamic range (HDR) monitor. This lets us address the <br>question of whether tone mapping is needed for HDR displays.}, BOOKTITLE = {Eurographics 2006 Proceedings}, EDITOR = {Gr{\"o}ller, Eduard and Szirmay-Kalos, L{\'a}szl{\'o}}, PAGES = {415--426}, JOURNAL = {Computer Graphics Forum}, VOLUME = {25}, ISSUE = {3}, ADDRESS = {Vienna, Austria}, }
Endnote
%0 Conference Proceedings %A Yoshida, Akiko %A Mantiuk, Rafa&#322; %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Analysis of Reproducing Real-world Appearance on Displays of Varying Dynamic Range : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2481-A %F EDOC: 356548 %F OTHER: Local-ID: C12573CC004A8E26-36B5343ECEA5A706C125730D00546611-Yoshida_EG2006z %R 10.1111/j.1467-8659.2006.00961.x %D 2006 %B Eurographics 2006 %Z date of event: 2006-09-04 - 2006-09-08 %C Vienna, Austria %X We conduct a series of experiments to investigate the desired properties of a <br>tone mapping operator (TMO) and to design such an operator based on subjective <br>data. We propose a novel approach to the tone mapping problem, in which the <br>tone mapping is determined by the data from subjective experiments, rather than <br>an image processing algorithm or a visual model. To collect such data, a series <br>of experiments are conducted in which the subjects adjust three generic TMO <br>parameters: brightness, contrast and color saturation. In two experiments, the <br>subjects are to find a) the most preferred image without a reference image and <br>b) the closest image to the real-world scene which the subjects are confronted <br>with. The purpose of these experiments is to collect data for two rendering <br>goals of a TMO: rendering the most preferred image and preserving the fidelity <br>with the real world scene. The data provide an assessment for the most <br>intuitive control over the tone mapping parameters. Unlike most of the <br>researched TMOs that focus on rendering for standard low dynamic range <br>monitors, we consider a broad range of potential displays, each offering <br>different dynamic range and brightness. We simulate capabilities of such <br>displays on a high dynamic range (HDR) monitor. This lets us address the <br>question of whether tone mapping is needed for HDR displays. %B Eurographics 2006 Proceedings %E Gr&#246;ller, Eduard; Szirmay-Kalos, L&#225;szl&#243; %P 415 - 426 %I Blackwell %J Computer Graphics Forum %V 25 %N 3 %I Blackwell-Wiley %@ false
Yoon, M., Lee, Y., Lee, S., Ivrissimtzis, I., and Seidel, H.-P. 2006. Ensembles for Normal and Surface Reconstructions. Geometric Modeling and Processing - GMP 2006, Springer.
Abstract
The majority of the existing techniques for surface reconstruction and the <br>closely related problem of normal estimation are deterministic. Their main <br>advantages are the speed and, given a reasonably good initial input, the high <br>quality of the reconstructed surfaces. Nevertheless, their deterministic nature <br>may hinder them from effectively handling incomplete data with noise and <br>outliers. In our previous work [1], we applied a statistical technique, called <br>ensembles, to the problem of surface reconstruction. We showed that an ensemble <br>can improve the performance of a deterministic algorithm by putting it into a <br>statistics based probabilistic setting. In this paper, with several <br>experiments, we further study the suitability of ensembles in surface <br>reconstruction, and also apply ensembles to normal estimation. We experimented <br>with a widely used normal estimation technique [2] and Multi-level Partitions <br>of Unity implicits for surface reconstruction [3], showing that normal and <br>surface ensembles can successfully be combined to handle noisy point sets.
Export
BibTeX
@inproceedings{Yoon-et-al_GMP06, TITLE = {Ensembles for Normal and Surface Reconstructions}, AUTHOR = {Yoon, Mincheol and Lee, Yunjin and Lee, Seungyong and Ivrissimtzis, Ioannis and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-540-36711-X}, DOI = {10.1007/11802914_2}, LOCALID = {Local-ID: C125675300671F7B-1FFB74D454097852C12572980033D598-SeidelYLLI2006}, PUBLISHER = {Springer}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {The majority of the existing techniques for surface reconstruction and the <br>closely related problem of normal estimation are deterministic. Their main <br>advantages are the speed and, given a reasonably good initial input, the high <br>quality of the reconstructed surfaces. Nevertheless, their deterministic nature <br>may hinder them from effectively handling incomplete data with noise and <br>outliers. In our previous work [1], we applied a statistical technique, called <br>ensembles, to the problem of surface reconstruction. We showed that an ensemble <br>can improve the performance of a deterministic algorithm by putting it into a <br>statistics based probabilistic setting. In this paper, with several <br>experiments, we further study the suitability of ensembles in surface <br>reconstruction, and also apply ensembles to normal estimation. We experimented <br>with a widely used normal estimation technique [2] and Multi-level Partitions <br>of Unity implicits for surface reconstruction [3], showing that normal and <br>surface ensembles can successfully be combined to handle noisy point sets.}, BOOKTITLE = {Geometric Modeling and Processing -- GMP 2006}, EDITOR = {Kim, Myung-Soo and Shimada, Kenji}, PAGES = {17--33}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {4077}, ADDRESS = {Pittsburgh, PA, USA}, }
Endnote
%0 Conference Proceedings %A Yoon, Mincheol %A Lee, Yunjin %A Lee, Seungyong %A Ivrissimtzis, Ioannis %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Ensembles for Normal and Surface Reconstructions : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-22B0-0 %F EDOC: 314602 %F OTHER: Local-ID: C125675300671F7B-1FFB74D454097852C12572980033D598-SeidelYLLI2006 %R 10.1007/11802914_2 %D 2006 %B 4th International Conference on Geometric Modeling and Processing %Z date of event: 2006-07-26 - 2006-07-28 %C Pittsburgh, PA, USA %X The majority of the existing techniques for surface reconstruction and the <br>closely related problem of normal estimation are deterministic. Their main <br>advantages are the speed and, given a reasonably good initial input, the high <br>quality of the reconstructed surfaces. Nevertheless, their deterministic nature <br>may hinder them from effectively handling incomplete data with noise and <br>outliers. In our previous work [1], we applied a statistical technique, called <br>ensembles, to the problem of surface reconstruction. We showed that an ensemble <br>can improve the performance of a deterministic algorithm by putting it into a <br>statistics based probabilistic setting. In this paper, with several <br>experiments, we further study the suitability of ensembles in surface <br>reconstruction, and also apply ensembles to normal estimation. We experimented <br>with a widely used normal estimation technique [2] and Multi-level Partitions <br>of Unity implicits for surface reconstruction [3], showing that normal and <br>surface ensembles can successfully be combined to handle noisy point sets. %B Geometric Modeling and Processing - GMP 2006 %E Kim, Myung-Soo; Shimada, Kenji %P 17 - 33 %I Springer %@ 3-540-36711-X %B Lecture Notes in Computer Science %N 4077 %U https://rdcu.be/dHR7Y
Yamauchi, H., Saleem, W., Yoshizawa, S., Karni, Z., Belyaev, A., and Seidel, H.-P. 2006. Towards Stable and Salient Multi-view Representation of 3D Shapes. IEEE International Conference on Shape Modeling and Applications 2006 (SMI 2006), IEEE.
Abstract
An approach to automatically select stable and salient representative views of <br>a given 3D object is proposed. Initially, a set of viewpoints are uniformly <br>sampled from the surface of a bounding sphere. The sampled viewpoints are <br>connected to their closest points to form a spherical graph in which each edge <br>is weighted by a similarity measure between the two views from its incident <br>vertices. Partitions of similar views are obtained using a graph partitioning <br>procedure and their ``centroids'' are considered to be their representative <br>views. Finally, the views are ranked based on a saliency measure to form the <br>object's representative views. This leads to a compact, human-oriented 2D <br>description of a 3D object, and as such, is useful both for traditional <br>applications like presentation and analysis of 3D<br> shapes, and for emerging ones like indexing and retrieval in large shape <br>repositories.
Export
BibTeX
@inproceedings{Yamauchi-et-al_SMI05.2, TITLE = {Towards Stable and Salient Multi-view Representation of {3D} Shapes}, AUTHOR = {Yamauchi, Hitoshi and Saleem, Waqar and Yoshizawa, Shin and Karni, Zachi and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-2591-1}, DOI = {10.1109/SMI.2006.42}, LOCALID = {Local-ID: C125675300671F7B-7F23BCE91591143EC125712D0061624A-bib:Yamauchi:SMI:2005}, PUBLISHER = {IEEE}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {An approach to automatically select stable and salient representative views of <br>a given 3D object is proposed. Initially, a set of viewpoints are uniformly <br>sampled from the surface of a bounding sphere. The sampled viewpoints are <br>connected to their closest points to form a spherical graph in which each edge <br>is weighted by a similarity measure between the two views from its incident <br>vertices. Partitions of similar views are obtained using a graph partitioning <br>procedure and their ``centroids'' are considered to be their representative <br>views. Finally, the views are ranked based on a saliency measure to form the <br>object's representative views. This leads to a compact, human-oriented 2D <br>description of a 3D object, and as such, is useful both for traditional <br>applications like presentation and analysis of 3D<br> shapes, and for emerging ones like indexing and retrieval in large shape <br>repositories.}, BOOKTITLE = {IEEE International Conference on Shape Modeling and Applications 2006 (SMI 2006)}, EDITOR = {Spagnuolo, M. and Belyaev, A. and Suzuki, H.}, PAGES = {265--270}, ADDRESS = {Matsushima, Japan}, }
Endnote
%0 Conference Proceedings %A Yamauchi, Hitoshi %A Saleem, Waqar %A Yoshizawa, Shin %A Karni, Zachi %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Towards Stable and Salient Multi-view Representation of 3D Shapes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-244E-F %F EDOC: 314372 %F OTHER: Local-ID: C125675300671F7B-7F23BCE91591143EC125712D0061624A-bib:Yamauchi:SMI:2005 %R 10.1109/SMI.2006.42 %D 2006 %B IEEE International Conference on Shape Modeling and Applications 2006 %Z date of event: 2006-06-14 - 2006-06-16 %C Matsushima, Japan %X An approach to automatically select stable and salient representative views of <br>a given 3D object is proposed. Initially, a set of viewpoints are uniformly <br>sampled from the surface of a bounding sphere. The sampled viewpoints are <br>connected to their closest points to form a spherical graph in which each edge <br>is weighted by a similarity measure between the two views from its incident <br>vertices. Partitions of similar views are obtained using a graph partitioning <br>procedure and their ``centroids'' are considered to be their representative <br>views. Finally, the views are ranked based on a saliency measure to form the <br>object's representative views. This leads to a compact, human-oriented 2D <br>description of a 3D object, and as such, is useful both for traditional <br>applications like presentation and analysis of 3D<br> shapes, and for emerging ones like indexing and retrieval in large shape <br>repositories. %B IEEE International Conference on Shape Modeling and Applications 2006 %E Spagnuolo, M.; Belyaev, A.; Suzuki, H. %P 265 - 270 %I IEEE %@ 0-7695-2591-1
Weinkauf, T., Theisel, H., Hege, H.-C., and Seidel, H.-P. 2006. Topological Structures in Two-Parameter-Dependent 2D Vector Fields. Computer Graphics Forum (Proc. EG 2006), Blackwell.
Abstract
In this paper we extract and visualize the topological skeleton of <br>two-parameter-dependent vector fields. This kind of vector data depends on two <br>parameter dimensions, for instance physical time and a scale parameter. We show <br>that two important classes of local bifurcations – fold and Hopf bifurcations – <br>build line structures for which we present an approach to extract them. <br>Furthermore we show that new kinds of structurally stable local bifurcations <br>exist for this data, namely fold-fold and Hopf-fold bifurcations. We present a <br>complete classification of them. We apply our topological extraction method to <br>analyze a number of two-parameter-dependent vector fields with different <br>physical interpretations of the two additional dimensions.
Export
BibTeX
@inproceedings{Weinkauf-et-al_EG06, TITLE = {Topological Structures in Two-Parameter-Dependent {2D} Vector Fields}, AUTHOR = {Weinkauf, Tino and Theisel, Holger and Hege, Hans-Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2006.00980.x}, LOCALID = {Local-ID: C125675300671F7B-C7A50DDD67376FFAC1257235005E9689-Theisel2006EG}, PUBLISHER = {Blackwell}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {In this paper we extract and visualize the topological skeleton of <br>two-parameter-dependent vector fields. This kind of vector data depends on two <br>parameter dimensions, for instance physical time and a scale parameter. We show <br>that two important classes of local bifurcations -- fold and Hopf bifurcations -- <br>build line structures for which we present an approach to extract them. <br>Furthermore we show that new kinds of structurally stable local bifurcations <br>exist for this data, namely fold-fold and Hopf-fold bifurcations. We present a <br>complete classification of them. We apply our topological extraction method to <br>analyze a number of two-parameter-dependent vector fields with different <br>physical interpretations of the two additional dimensions.}, BOOKTITLE = {EUROGRAPHICS 2006 (EG 2006)}, EDITOR = {Szirmay-Kalos, L{\'a}szl{\'o} and Gr{\"o}ller, Eduard}, PAGES = {607--616}, JOURNAL = {Computer Graphics Forum (Proc. EG)}, VOLUME = {25}, ISSUE = {3}, ADDRESS = {Vienna, Austria}, }
Endnote
%0 Conference Proceedings %A Weinkauf, Tino %A Theisel, Holger %A Hege, Hans-Christian %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Topological Structures in Two-Parameter-Dependent 2D Vector Fields : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2443-5 %F EDOC: 314631 %F OTHER: Local-ID: C125675300671F7B-C7A50DDD67376FFAC1257235005E9689-Theisel2006EG %R 10.1111/j.1467-8659.2006.00980.x %D 2006 %B Eurographics 2006 %Z date of event: 2006-09-04 - 2006-09-08 %C Vienna, Austria %X In this paper we extract and visualize the topological skeleton of <br>two-parameter-dependent vector fields. This kind of vector data depends on two <br>parameter dimensions, for instance physical time and a scale parameter. We show <br>that two important classes of local bifurcations &#8211; fold and Hopf bifurcations &#8211; <br>build line structures for which we present an approach to extract them. <br>Furthermore we show that new kinds of structurally stable local bifurcations <br>exist for this data, namely fold-fold and Hopf-fold bifurcations. We present a <br>complete classification of them. We apply our topological extraction method to <br>analyze a number of two-parameter-dependent vector fields with different <br>physical interpretations of the two additional dimensions. %B EUROGRAPHICS 2006 %E Szirmay-Kalos, L&#225;szl&#243;; Gr&#246;ller, Eduard %P 607 - 616 %I Blackwell %J Computer Graphics Forum %V 25 %N 3 %I Blackwell-Wiley %@ false
Wald, I., Dietrich, A., Benthin, C., et al. 2006. A Ray Tracing based Framework for High-Quality Virtual Reality in Industrial Design Applications. Proceedings of the 2006 IEEE Symposium on Interactive Ray Tracing, IEEE.
Abstract
Computer aided design (CAD) and virtual reality (VR) are becoming increasingly important tools for industrial design applications. Unfortunately, there is a huge and growing gap between what data CAD engineers are working on, what rendering quality is needed by designers and executives to faithfully judge a design variant, and what rendering capabilities are offered by commonly available VR frameworks. In particular, existing VR systems cannot currently cope with the accuracy demanded by CAD engineers, nor can they deliver the photo-realistic rendering quality and reliability required by designers and decision makers. In this paper, we describe a ray tracing based virtual reality framework that closes these gaps. In particular, the proposed system supports direct ray tracing of trimmed freeform surfaces even for complex models of thousands of patches, allows for accurately simulating reflections and refraction for glass and car paint effects, offers support for direct integration of measured materials via bidirectional texture functions, and even allows for soft environmental lighting from high dynamic range environment maps. All of these effects can be delivered interactively, and are demonstrated on a real-world industrial model, a complete Mercedes C-Class car.
Export
BibTeX
@inproceedings{wald:06:ART, TITLE = {A Ray Tracing based Framework for High-Quality Virtual Reality in Industrial Design Applications}, AUTHOR = {Wald, Ingo and Dietrich, Andreas and Benthin, Carsten and Efremov, Alexander and Dahmen, Tim and G{\"u}nther, Johannes and Havran, Vlastimil and Seidel, Hans-Peter and Slusallek, Philipp}, EDITOR = {Wald, Ingo and Parker, Steven G.}, LANGUAGE = {eng}, ISBN = {1-4244-0693-5}, LOCALID = {Local-ID: C125675300671F7B-4EC5DA4BA6A4D3DFC125722D0034421D-wald:06:ART}, PUBLISHER = {IEEE}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {Computer aided design (CAD) and virtual reality (VR) are becoming increasingly important tools for industrial design applications. Unfortunately, there is a huge and growing gap between what data CAD engineers are working on, what rendering quality is needed by designers and executives to faithfully judge a design variant, and what rendering capabilities are offered by commonly available VR frameworks. In particular, existing VR systems cannot currently cope with the accuracy demanded by CAD engineers, nor can they deliver the photo-realistic rendering quality and reliability required by designers and decision makers. In this paper, we describe a ray tracing based virtual reality framework that closes these gaps. In particular, the proposed system supports direct ray tracing of trimmed freeform surfaces even for complex models of thousands of patches, allows for accurately simulating reflections and refraction for glass and car paint effects, offers support for direct integration of measured materials via bidirectional texture functions, and even allows for soft environmental lighting from high dynamic range environment maps. All of these effects can be delivered interactively, and are demonstrated on a real-world industrial model, a complete Mercedes C-Class car.}, BOOKTITLE = {Proceedings of the 2006 IEEE Symposium on Interactive Ray Tracing}, PAGES = {177--185}, }
Endnote
%0 Conference Proceedings %A Wald, Ingo %A Dietrich, Andreas %A Benthin, Carsten %A Efremov, Alexander %A Dahmen, Tim %A G&#252;nther, Johannes %A Havran, Vlastimil %A Seidel, Hans-Peter %A Slusallek, Philipp %E Wald, Ingo %E Parker, Steven G. %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Ray Tracing based Framework for High-Quality Virtual Reality in Industrial Design Applications : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-221C-D %F EDOC: 314625 %F OTHER: Local-ID: C125675300671F7B-4EC5DA4BA6A4D3DFC125722D0034421D-wald:06:ART %I IEEE %D 2006 %B Untitled Event %Z date of event: 2006-09-18 - %C Salt Lake City, USA %X Computer aided design (CAD) and virtual reality (VR) are becoming increasingly important tools for industrial design applications. Unfortunately, there is a huge and growing gap between what data CAD engineers are working on, what rendering quality is needed by designers and executives to faithfully judge a design variant, and what rendering capabilities are offered by commonly available VR frameworks. In particular, existing VR systems cannot currently cope with the accuracy demanded by CAD engineers, nor can they deliver the photo-realistic rendering quality and reliability required by designers and decision makers. In this paper, we describe a ray tracing based virtual reality framework that closes these gaps. In particular, the proposed system supports direct ray tracing of trimmed freeform surfaces even for complex models of thousands of patches, allows for accurately simulating reflections and refraction for glass and car paint effects, offers support for direct integration of measured materials via bidirectional texture functions, and even allows for soft environmental lighting from high dynamic range environment maps. All of these effects can be delivered interactively, and are demonstrated on a real-world industrial model, a complete Mercedes C-Class car. %B Proceedings of the 2006 IEEE Symposium on Interactive Ray Tracing %P 177 - 185 %I IEEE %@ 1-4244-0693-5
Von Funck, W., Theisel, H., and Seidel, H.-P. 2006a. Shape Matching Based on Fully Automatic Face Detection on Triangular Meshes. Advances in Computer Graphics (CGI 2006), Springer.
Abstract
This paper tackles a particular shape matching problem: given a data base of <br>shapes (described as triangular meshes), we search for all shapes which <br>describe a human. We do so by applying a 3D face detection approach on the mesh <br>which consists of three steps: first, a local symmetry value is computed for <br>each vertex. Then, the symmetry values in a certain neighborhood of each vertex <br>are analyzed for building sharp symmetry lines. Finally, the geometry around <br>each vertex is analyzed to get further facial features like nose and forehead. <br>We tested our approach with several shape data bases (e.g. the Princeton Shape <br>Benchmark) and achieved high rates of correct face detection.
Export
BibTeX
@inproceedings{Theisel-et-al_CGI06, TITLE = {Shape Matching Based on Fully Automatic Face Detection on Triangular Meshes}, AUTHOR = {von Funck, Wolfram and Theisel, Holger and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-540-35638-X}, DOI = {10.1007/11784203_21}, LOCALID = {Local-ID: C125675300671F7B-4F6EB0193C1D335CC125722E003A91B1-vonFunck2006a}, PUBLISHER = {Springer}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {This paper tackles a particular shape matching problem: given a data base of <br>shapes (described as triangular meshes), we search for all shapes which <br>describe a human. We do so by applying a 3D face detection approach on the mesh <br>which consists of three steps: first, a local symmetry value is computed for <br>each vertex. Then, the symmetry values in a certain neighborhood of each vertex <br>are analyzed for building sharp symmetry lines. Finally, the geometry around <br>each vertex is analyzed to get further facial features like nose and forehead. <br>We tested our approach with several shape data bases (e.g. the Princeton Shape <br>Benchmark) and achieved high rates of correct face detection.}, BOOKTITLE = {Advances in Computer Graphics (CGI 2006)}, EDITOR = {Nishita, Tomoyuki and Peng, Qunsheng and Seidel, Hans-Peter}, PAGES = {242--253}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {4035}, ADDRESS = {Hangzhou, China}, }
Endnote
%0 Conference Proceedings %A von Funck, Wolfram %A Theisel, Holger %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Shape Matching Based on Fully Automatic Face Detection on Triangular Meshes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-23EE-D %F EDOC: 314496 %F OTHER: Local-ID: C125675300671F7B-4F6EB0193C1D335CC125722E003A91B1-vonFunck2006a %R 10.1007/11784203_21 %D 2006 %B 24th Computer Graphics International Conference %Z date of event: 2006-06-26 - 2006-06-28 %C Hangzhou, China %X This paper tackles a particular shape matching problem: given a data base of <br>shapes (described as triangular meshes), we search for all shapes which <br>describe a human. We do so by applying a 3D face detection approach on the mesh <br>which consists of three steps: first, a local symmetry value is computed for <br>each vertex. Then, the symmetry values in a certain neighborhood of each vertex <br>are analyzed for building sharp symmetry lines. Finally, the geometry around <br>each vertex is analyzed to get further facial features like nose and forehead. <br>We tested our approach with several shape data bases (e.g. the Princeton Shape <br>Benchmark) and achieved high rates of correct face detection. %B Advances in Computer Graphics %E Nishita, Tomoyuki; Peng, Qunsheng; Seidel, Hans-Peter %P 242 - 253 %I Springer %@ 3-540-35638-X %B Lecture Notes in Computer Science %N 4035 %U https://rdcu.be/dHTs9
Von Funck, W., Theisel, H., and Seidel, H.-P. 2006b. Vector Field Based Shape Deformations. ACM Transactions on Graphics, ACM.
Abstract
We present an approach to define shape deformations by constructing and <br>interactively modifying C1 continuous time-dependent divergence-free vector <br>fields. The deformation is obtained by a path line integration of the mesh <br>vertices. This way, the deformation is volume-preserving, free of (local and <br>global) self-intersections, feature preserving, smoothness preserving, and <br>local. Different modeling metaphors support the approach which is able to <br>modify the vector field on-the-fly according to the user input. The approach <br>works at interactive frame rates for moderate mesh sizes, and the numerical <br>integration preserves the volume with a high accuracy.
Export
BibTeX
@inproceedings{Theisel-et-al_SIGGRAPH06, TITLE = {Vector Field Based Shape Deformations}, AUTHOR = {von Funck, Wolfram and Theisel, Holger and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/1141911.1142002}, LOCALID = {Local-ID: C125675300671F7B-8A7FFA26D12F92CBC125722E0037DC71-vonFunck2006}, PUBLISHER = {ACM}, PUBLISHER = {Association for Computing Machinery}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {We present an approach to define shape deformations by constructing and <br>interactively modifying C1 continuous time-dependent divergence-free vector <br>fields. The deformation is obtained by a path line integration of the mesh <br>vertices. This way, the deformation is volume-preserving, free of (local and <br>global) self-intersections, feature preserving, smoothness preserving, and <br>local. Different modeling metaphors support the approach which is able to <br>modify the vector field on-the-fly according to the user input. The approach <br>works at interactive frame rates for moderate mesh sizes, and the numerical <br>integration preserves the volume with a high accuracy.}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2006}, EDITOR = {Dorsey, Julie}, PAGES = {1118--1125}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {25}, ISSUE = {3}, ADDRESS = {Boston, MA, USA}, }
Endnote
%0 Conference Proceedings %A von Funck, Wolfram %A Theisel, Holger %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Vector Field Based Shape Deformations : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2470-0 %F EDOC: 314632 %F OTHER: Local-ID: C125675300671F7B-8A7FFA26D12F92CBC125722E0037DC71-vonFunck2006 %R 10.1145/1141911.1142002 %D 2006 %B SIGGRAPH 2006: 33rd Annual Conference on Computer Graphics and Interactive Techniques %Z date of event: 2006-07-30 - 2006-08-03 %C Boston, MA, USA %X We present an approach to define shape deformations by constructing and <br>interactively modifying C1 continuous time-dependent divergence-free vector <br>fields. The deformation is obtained by a path line integration of the mesh <br>vertices. This way, the deformation is volume-preserving, free of (local and <br>global) self-intersections, feature preserving, smoothness preserving, and <br>local. Different modeling metaphors support the approach which is able to <br>modify the vector field on-the-fly according to the user input. The approach <br>works at interactive frame rates for moderate mesh sizes, and the numerical <br>integration preserves the volume with a high accuracy. %B Proceedings of ACM SIGGRAPH 2006 %E Dorsey, Julie %P 1118 - 1125 %I ACM %J ACM Transactions on Graphics %V 25 %N 3 %I Association for Computing Machinery %@ false
Theobalt, C., Ahmed, N., Lensch, H.P.A., Magnor, M.A., and Seidel, H.-P. 2006. Enhanced dynamic reflectometry for relightable free-viewpoint video. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
Free-Viewpoint Video of Human Actors allows photo- realistic rendering of real-world people under novel viewing conditions. Dynamic Reflectometry extends the concept of free-view point video and allows rendering in addition under novel lighting conditions. In this work, we present an enhanced method for capturing human shape and motion as well as dynamic surface reflectance properties from a sparse set of input video streams. We augment our initial method for model-based relightable free-viewpoint video in several ways. Firstly, a single-skin mesh is introduced for the continuous appearance of the model. Moreover an algorithm to detect and compensate lateral shifting of textiles in order to improve temporal texture registration is presented. Finally, a structured resampling approach is introduced which enables reliable estimation of spatially varying surface reflectance despite a static recording setup. The new algorithm ingredients along with the Relightable 3D Video framework enables us to realistically reproduce the appearance of animated virtual actors under different lighting conditions, as well as to interchange surface attributes among different people, e.g. for virtual dressing. Our contribution can be used to create 3D renditions of real-world people under arbitrary novel lighting conditions on standard graphics hardware.
Export
BibTeX
@techreport{TheobaltAhmedLenschMagnorSeidel2006, TITLE = {Enhanced dynamic reflectometry for relightable free-viewpoint video}, AUTHOR = {Theobalt, Christian and Ahmed, Naveed and Lensch, Hendrik P. A. and Magnor, Marcus A. and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2006-4-006}, NUMBER = {MPI-I-2006-4-006}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {Free-Viewpoint Video of Human Actors allows photo- realistic rendering of real-world people under novel viewing conditions. Dynamic Reflectometry extends the concept of free-view point video and allows rendering in addition under novel lighting conditions. In this work, we present an enhanced method for capturing human shape and motion as well as dynamic surface reflectance properties from a sparse set of input video streams. We augment our initial method for model-based relightable free-viewpoint video in several ways. Firstly, a single-skin mesh is introduced for the continuous appearance of the model. Moreover an algorithm to detect and compensate lateral shifting of textiles in order to improve temporal texture registration is presented. Finally, a structured resampling approach is introduced which enables reliable estimation of spatially varying surface reflectance despite a static recording setup. The new algorithm ingredients along with the Relightable 3D Video framework enables us to realistically reproduce the appearance of animated virtual actors under different lighting conditions, as well as to interchange surface attributes among different people, e.g. for virtual dressing. Our contribution can be used to create 3D renditions of real-world people under arbitrary novel lighting conditions on standard graphics hardware.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Theobalt, Christian %A Ahmed, Naveed %A Lensch, Hendrik P. A. %A Magnor, Marcus A. %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Enhanced dynamic reflectometry for relightable free-viewpoint video : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-67F4-B %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2006-4-006 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2006 %P 37 p. %X Free-Viewpoint Video of Human Actors allows photo- realistic rendering of real-world people under novel viewing conditions. Dynamic Reflectometry extends the concept of free-view point video and allows rendering in addition under novel lighting conditions. In this work, we present an enhanced method for capturing human shape and motion as well as dynamic surface reflectance properties from a sparse set of input video streams. We augment our initial method for model-based relightable free-viewpoint video in several ways. Firstly, a single-skin mesh is introduced for the continuous appearance of the model. Moreover an algorithm to detect and compensate lateral shifting of textiles in order to improve temporal texture registration is presented. Finally, a structured resampling approach is introduced which enables reliable estimation of spatially varying surface reflectance despite a static recording setup. The new algorithm ingredients along with the Relightable 3D Video framework enables us to realistically reproduce the appearance of animated virtual actors under different lighting conditions, as well as to interchange surface attributes among different people, e.g. for virtual dressing. Our contribution can be used to create 3D renditions of real-world people under arbitrary novel lighting conditions on standard graphics hardware. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Stoll, C., Karni, Z., and Seidel, H.-P. 2006a. Geodesics Guided Constrained Texture Deformation. The 14th Pacific Conference on Computer Graphics and Applications Proceedings, National Taiwan University Press.
Abstract
We present a method that deforms an image plane to visually meet the shape and pose of a manifold sur-face. The user provides constraints that couple a small number of surface points with their corresponding im-age pixels to initially deform the plane. Matching, based on geodesic distances, couples additional points, followed by a second deformation that brings the im-age plane into its final pose and shape. The method works on any type of surface that supports geodesic distances evaluation. This includes not-triangulated and high genus models with arbitrary topology. The result is a smooth, visually pleasing and realistic tex-tured surface that can be superimposed onto or used instead of the original model and with some limitations can be considered as a parameterization or remeshing method for the area of interest.
Export
BibTeX
@inproceedings{StollPG06, TITLE = {Geodesics Guided Constrained Texture Deformation}, AUTHOR = {Stoll, Carsten and Karni, Zachi and Seidel, Hans-Peter}, LANGUAGE = {eng}, LOCALID = {Local-ID: C125675300671F7B-DCC1795F1119D770C1257209004D1D9E-StollPG06}, PUBLISHER = {National Taiwan University Press}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {We present a method that deforms an image plane to visually meet the shape and pose of a manifold sur-face. The user provides constraints that couple a small number of surface points with their corresponding im-age pixels to initially deform the plane. Matching, based on geodesic distances, couples additional points, followed by a second deformation that brings the im-age plane into its final pose and shape. The method works on any type of surface that supports geodesic distances evaluation. This includes not-triangulated and high genus models with arbitrary topology. The result is a smooth, visually pleasing and realistic tex-tured surface that can be superimposed onto or used instead of the original model and with some limitations can be considered as a parameterization or remeshing method for the area of interest.}, BOOKTITLE = {The 14th Pacific Conference on Computer Graphics and Applications Proceedings}, PAGES = {144--152}, SERIES = {Pacific Conference on Computer Graphics and Applications Proceedings}, }
Endnote
%0 Conference Proceedings %A Stoll, Carsten %A Karni, Zachi %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Geodesics Guided Constrained Texture Deformation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-22FC-6 %F EDOC: 314611 %F OTHER: Local-ID: C125675300671F7B-DCC1795F1119D770C1257209004D1D9E-StollPG06 %I National Taiwan University Press %D 2006 %B Untitled Event %Z date of event: 2006-10-11 - %C Taipei, Taiwan %X We present a method that deforms an image plane to visually meet the shape and pose of a manifold sur-face. The user provides constraints that couple a small number of surface points with their corresponding im-age pixels to initially deform the plane. Matching, based on geodesic distances, couples additional points, followed by a second deformation that brings the im-age plane into its final pose and shape. The method works on any type of surface that supports geodesic distances evaluation. This includes not-triangulated and high genus models with arbitrary topology. The result is a smooth, visually pleasing and realistic tex-tured surface that can be superimposed onto or used instead of the original model and with some limitations can be considered as a parameterization or remeshing method for the area of interest. %B The 14th Pacific Conference on Computer Graphics and Applications Proceedings %P 144 - 152 %I National Taiwan University Press %B Pacific Conference on Computer Graphics and Applications Proceedings
Stoll, C., Karni, Z., Rössl, C., Yamauchi, H., and Seidel, H.-P. 2006b. Template Deformation for Point Cloud Fitting. Proceedings of the 3rd Symposium on Point-Based Graphics, Eurographics.
Export
BibTeX
@inproceedings{Stoll-et-al_SPBG06, TITLE = {Template Deformation for Point Cloud Fitting}, AUTHOR = {Stoll, Carsten and Karni, Zachi and R{\"o}ssl, Christian and Yamauchi, Hitoshi and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.2312/SPBG/SPBG06/027-035}, LOCALID = {Local-ID: C125675300671F7B-B080968C4D51F338C125719400533800-stoll:td:2006}, PUBLISHER = {Eurographics}, YEAR = {2006}, DATE = {2006}, BOOKTITLE = {Proceedings of the 3rd Symposium on Point-Based Graphics}, EDITOR = {Botsch, Mario and Chen, Baoquan}, PAGES = {27--35}, ADDRESS = {Boston, USA}, }
Endnote
%0 Conference Proceedings %A Stoll, Carsten %A Karni, Zachi %A R&#246;ssl, Christian %A Yamauchi, Hitoshi %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Template Deformation for Point Cloud Fitting : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2414-D %F EDOC: 314469 %F OTHER: Local-ID: C125675300671F7B-B080968C4D51F338C125719400533800-stoll:td:2006 %R 10.2312/SPBG/SPBG06/027-035 %D 2006 %B 3rd Symposium on Point-Based Graphics %Z date of event: 2006-07-29 - 2006-07-30 %C Boston, USA %B Proceedings of the 3rd Symposium on Point-Based Graphics %E Botsch, Mario; Chen, Baoquan %P 27 - 35 %I Eurographics
Stoll, C., Seidel, H.-P., and Alexa, M. 2006c. BSP Shapes. 2006 International Conference on Shape Modeling and Applications (SMI 2006), IEEE.
Abstract
We discuss a shape representation based on a set of disconnected<br>(planar) polygons. The polygons are computed<br>by creating a BSP that contains approximately linear surface<br>patches in each cell. This is achieved by employing two<br>heuristics for finding appropriate split planes in each cell.<br>Leaf nodes in the BSP tree represent either polygonal surface<br>approximations or empty (clip) cells rather than split<br>planes. We show that the resulting set of disconnected primitives<br>typically leads to a better two-sided Hausdorff error<br>for a given number of primitives than meshes. The BSP<br>cells can be coded with few bits and, consequently, the tree<br>is a compact shape representation. The special properties<br>of BSPs are very useful in applications that need to perform<br>spatial queries on the primitives, such as for occlusion and<br>view frustum culling, and proximity or collision tests.
Export
BibTeX
@inproceedings{Stoll-et-al_SMI06, TITLE = {{BSP} Shapes}, AUTHOR = {Stoll, Carsten and Seidel, Hans-Peter and Alexa, Marc}, LANGUAGE = {eng}, ISBN = {0-7695-2591-1}, DOI = {10.1109/SMI.2006.5}, LOCALID = {Local-ID: C125675300671F7B-D27A3D53A6DF3879C12571DF004E6CC7-Stoll2006bs}, PUBLISHER = {IEEE}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {We discuss a shape representation based on a set of disconnected<br>(planar) polygons. The polygons are computed<br>by creating a BSP that contains approximately linear surface<br>patches in each cell. This is achieved by employing two<br>heuristics for finding appropriate split planes in each cell.<br>Leaf nodes in the BSP tree represent either polygonal surface<br>approximations or empty (clip) cells rather than split<br>planes. We show that the resulting set of disconnected primitives<br>typically leads to a better two-sided Hausdorff error<br>for a given number of primitives than meshes. The BSP<br>cells can be coded with few bits and, consequently, the tree<br>is a compact shape representation. The special properties<br>of BSPs are very useful in applications that need to perform<br>spatial queries on the primitives, such as for occlusion and<br>view frustum culling, and proximity or collision tests.}, BOOKTITLE = {2006 International Conference on Shape Modeling and Applications (SMI 2006)}, PAGES = {42--47}, ADDRESS = {Matsushima, Japan}, }
Endnote
%0 Conference Proceedings %A Stoll, Carsten %A Seidel, Hans-Peter %A Alexa, Marc %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T BSP Shapes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2248-9 %F EDOC: 314385 %F OTHER: Local-ID: C125675300671F7B-D27A3D53A6DF3879C12571DF004E6CC7-Stoll2006bs %R 10.1109/SMI.2006.5 %D 2006 %B 2006 International Conference on Shape Modeling and Applications %Z date of event: 2006-06-14 - 2006-06-16 %C Matsushima, Japan %X We discuss a shape representation based on a set of disconnected<br>(planar) polygons. The polygons are computed<br>by creating a BSP that contains approximately linear surface<br>patches in each cell. This is achieved by employing two<br>heuristics for finding appropriate split planes in each cell.<br>Leaf nodes in the BSP tree represent either polygonal surface<br>approximations or empty (clip) cells rather than split<br>planes. We show that the resulting set of disconnected primitives<br>typically leads to a better two-sided Hausdorff error<br>for a given number of primitives than meshes. The BSP<br>cells can be coded with few bits and, consequently, the tree<br>is a compact shape representation. The special properties<br>of BSPs are very useful in applications that need to perform<br>spatial queries on the primitives, such as for occlusion and<br>view frustum culling, and proximity or collision tests. %B 2006 International Conference on Shape Modeling and Applications %P 42 - 47 %I IEEE %@ 0-7695-2591-1
Stoll, C., Gumhold, S., and Seidel, H.-P. 2006d. Incremental Raycasting of Piecewise Quadratic Surfaces on the GPU. IEEE Symposium on Interactive Raytracing 2006 Proceedings, IEEE.
Abstract
To overcome the limitations of triangle and point based surfaces several authors have recently investigated surface representations that are based on higher order primitives. Among these are MPU, SLIM surfaces, dynamic skin surfaces and higher order isosurfaces. Up to now these representations were not suitable for interactive applications because of the lack of an efficient rendering algorithm. In this paper we close this gap for implicit surface representations of degree two by developing highly optimized GPU implementations of the raycasting algorithm. We investigate techniques for fast incremental raycasting and cover per fragment and per quadric backface culling. We apply the approaches to the rendering of SLIM surfaces, quadratic iso-surfaces over tetrahedral meshes and bilinear quadrilaterals. Compared to triangle based surface approximations of similar geometric error we achieve only slightly lower frame rates but with much higher visual quality due to the quadratic approximation power of the underlying surfaces.
Export
BibTeX
@inproceedings{StollRT06, TITLE = {Incremental Raycasting of Piecewise Quadratic Surfaces on the {GPU}}, AUTHOR = {Stoll, Carsten and Gumhold, Stefan and Seidel, Hans-Peter}, EDITOR = {Wald, Ingo and Parke, Steven G.}, LANGUAGE = {eng}, ISBN = {1-4244-0693-5}, LOCALID = {Local-ID: C125675300671F7B-570FDF134DD31F9BC1257209004E68E2-StollRT06}, PUBLISHER = {IEEE}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {To overcome the limitations of triangle and point based surfaces several authors have recently investigated surface representations that are based on higher order primitives. Among these are MPU, SLIM surfaces, dynamic skin surfaces and higher order isosurfaces. Up to now these representations were not suitable for interactive applications because of the lack of an efficient rendering algorithm. In this paper we close this gap for implicit surface representations of degree two by developing highly optimized GPU implementations of the raycasting algorithm. We investigate techniques for fast incremental raycasting and cover per fragment and per quadric backface culling. We apply the approaches to the rendering of SLIM surfaces, quadratic iso-surfaces over tetrahedral meshes and bilinear quadrilaterals. Compared to triangle based surface approximations of similar geometric error we achieve only slightly lower frame rates but with much higher visual quality due to the quadratic approximation power of the underlying surfaces.}, BOOKTITLE = {IEEE Symposium on Interactive Raytracing 2006 Proceedings}, PAGES = {141--150}, SERIES = {IEEE Symposium on Interactive Raytracing Proceedings}, }
Endnote
%0 Conference Proceedings %A Stoll, Carsten %A Gumhold, Stefan %A Seidel, Hans-Peter %E Wald, Ingo %E Parke, Steven G. %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Incremental Raycasting of Piecewise Quadratic Surfaces on the GPU : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2333-F %F EDOC: 314543 %F OTHER: Local-ID: C125675300671F7B-570FDF134DD31F9BC1257209004E68E2-StollRT06 %I IEEE %D 2006 %B Untitled Event %Z date of event: 2006-09-18 - %C Salt Lake City, USA %X To overcome the limitations of triangle and point based surfaces several authors have recently investigated surface representations that are based on higher order primitives. Among these are MPU, SLIM surfaces, dynamic skin surfaces and higher order isosurfaces. Up to now these representations were not suitable for interactive applications because of the lack of an efficient rendering algorithm. In this paper we close this gap for implicit surface representations of degree two by developing highly optimized GPU implementations of the raycasting algorithm. We investigate techniques for fast incremental raycasting and cover per fragment and per quadric backface culling. We apply the approaches to the rendering of SLIM surfaces, quadratic iso-surfaces over tetrahedral meshes and bilinear quadrilaterals. Compared to triangle based surface approximations of similar geometric error we achieve only slightly lower frame rates but with much higher visual quality due to the quadratic approximation power of the underlying surfaces. %B IEEE Symposium on Interactive Raytracing 2006 Proceedings %P 141 - 150 %I IEEE %@ 1-4244-0693-5 %B IEEE Symposium on Interactive Raytracing Proceedings
Smith, K., Krawczyk, G., Myszkowski, K., and Seidel, H.-P. 2006. Beyond Tone Mapping: Enhanced Depiction of Tone Mapped HDR Images. Computer Graphics Forum, Blackwell.
Abstract
High Dynamic Range (HDR) images capture the full range of luminance<br> present in real world scenes, and unlike Low Dynamic Range (LDR)<br> images, can simultaneously contain detailed information in the<br> deepest of shadows and the brightest of light sources. For display<br> or aesthetic purposes, it is often necessary to perform tone<br> mapping, which creates LDR depictions of HDR images at the cost of<br> contrast information loss. The purpose of this work is two-fold: to<br> analyze a displayed LDR image against its original HDR counterpart<br> in terms of perceived contrast distortion, and to enhance the LDR<br> depiction with perceptually driven colour adjustments to restore the<br> original HDR contrast information. For analysis, we present a novel<br> algorithm for the characterization of tone mapping distortion in terms<br> of observed loss of global contrast, and loss of contour and texture<br> details. We classify existing tone mapping operators accordingly.<br> We measure both distortions with perceptual metrics that enable the<br> automatic and meaningful enhancement of LDR depictions. For image<br> enhancement, we identify artistic and photographic colour techniques<br> from which we derive adjustments that create contrast with colour.<br> The enhanced LDR image is an improved depiction of the original HDR<br> image with restored contrast<br> information.
Export
BibTeX
@inproceedings{Smith-et-al_EG06, TITLE = {Beyond Tone Mapping: Enhanced Depiction of Tone Mapped {HDR} Images}, AUTHOR = {Smith, Kaleigh and Krawczyk, Grzegorz and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, ISBN = {ISSN: 0167-7055}, DOI = {10.1111/j.1467-8659.2006.00962.x}, LOCALID = {Local-ID: C125675300671F7B-8B783A77FDD3AB10C125722F003AF5B2-Smith2006eg}, PUBLISHER = {Blackwell}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {High Dynamic Range (HDR) images capture the full range of luminance<br> present in real world scenes, and unlike Low Dynamic Range (LDR)<br> images, can simultaneously contain detailed information in the<br> deepest of shadows and the brightest of light sources. For display<br> or aesthetic purposes, it is often necessary to perform tone<br> mapping, which creates LDR depictions of HDR images at the cost of<br> contrast information loss. The purpose of this work is two-fold: to<br> analyze a displayed LDR image against its original HDR counterpart<br> in terms of perceived contrast distortion, and to enhance the LDR<br> depiction with perceptually driven colour adjustments to restore the<br> original HDR contrast information. For analysis, we present a novel<br> algorithm for the characterization of tone mapping distortion in terms<br> of observed loss of global contrast, and loss of contour and texture<br> details. We classify existing tone mapping operators accordingly.<br> We measure both distortions with perceptual metrics that enable the<br> automatic and meaningful enhancement of LDR depictions. For image<br> enhancement, we identify artistic and photographic colour techniques<br> from which we derive adjustments that create contrast with colour.<br> The enhanced LDR image is an improved depiction of the original HDR<br> image with restored contrast<br> information.}, BOOKTITLE = {EUROGRAPHICS 2006 Proceedings}, EDITOR = {Szirmay-Kalos, L{\'a}szl{\'o} and Gr{\"o}ller, Eduard}, PAGES = {427--438}, JOURNAL = {Computer Graphics Forum}, VOLUME = {25}, ISSUE = {3}, ADDRESS = {Vienna, Austria}, }
Endnote
%0 Conference Proceedings %A Smith, Kaleigh %A Krawczyk, Grzegorz %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Beyond Tone Mapping: Enhanced Depiction of Tone Mapped HDR Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-223F-0 %F EDOC: 314503 %F OTHER: Local-ID: C125675300671F7B-8B783A77FDD3AB10C125722F003AF5B2-Smith2006eg %R 10.1111/j.1467-8659.2006.00962.x %D 2006 %B The European Association for Computer Graphics 27th Annual Conference %Z date of event: 2006-09-04 - 2006-09-08 %C Vienna, Austria %X High Dynamic Range (HDR) images capture the full range of luminance<br> present in real world scenes, and unlike Low Dynamic Range (LDR)<br> images, can simultaneously contain detailed information in the<br> deepest of shadows and the brightest of light sources. For display<br> or aesthetic purposes, it is often necessary to perform tone<br> mapping, which creates LDR depictions of HDR images at the cost of<br> contrast information loss. The purpose of this work is two-fold: to<br> analyze a displayed LDR image against its original HDR counterpart<br> in terms of perceived contrast distortion, and to enhance the LDR<br> depiction with perceptually driven colour adjustments to restore the<br> original HDR contrast information. For analysis, we present a novel<br> algorithm for the characterization of tone mapping distortion in terms<br> of observed loss of global contrast, and loss of contour and texture<br> details. We classify existing tone mapping operators accordingly.<br> We measure both distortions with perceptual metrics that enable the<br> automatic and meaningful enhancement of LDR depictions. For image<br> enhancement, we identify artistic and photographic colour techniques<br> from which we derive adjustments that create contrast with colour.<br> The enhanced LDR image is an improved depiction of the original HDR<br> image with restored contrast<br> information. %B EUROGRAPHICS 2006 Proceedings %E Szirmay-Kalos, L&#225;szl&#243;; Gr&#246;ller, Eduard %P 427 - 438 %I Blackwell %@ ISSN: 0167-7055 %J Computer Graphics Forum %V 25 %N 3 %I Blackwell-Wiley %@ false
Shi, K., Theisel, H., Weinkauf, T., Hauser, H., Hege, H.-C., and Seidel, H.-P. 2006. Path Line Oriented Topology for Periodic 2D Time-dependent Vector Fields. EUROVIS 2006 : Eurographics / IEEE VGTC Symposium on Visualization, Eurographics.
Abstract
This paper presents an approach to extracting a path line oriented topological <br>segmentation for periodic 2D time-dependent vector fields. Topological methods <br>aiming in capturing the asymptotic behavior of path lines rarely exist <br>because path lines are usually only defined over a fixed <br>time-interval, making statements about their asymptotic behavior impossible. <br>For the data class of periodic vector fields, this restriction does not apply <br>any more. Our approach detects critical path <br>lines as well as basins from which the path lines converge to the critical <br>ones. We demonstrate our approach on a number of test data sets.
Export
BibTeX
@inproceedings{Shi-et-al_EUROVIS06, TITLE = {Path Line Oriented Topology for Periodic {2D} Time-dependent Vector Fields}, AUTHOR = {Shi, Kuangyu and Theisel, Holger and Weinkauf, Tino and Hauser, Helwig and Hege, Hans-Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-905673-31-2}, DOI = {10.2312/VisSym/EuroVis06/139-146}, LOCALID = {Local-ID: C125675300671F7B-63EA5E084849561EC125722E00402DF9-shi06a}, PUBLISHER = {Eurographics}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {This paper presents an approach to extracting a path line oriented topological <br>segmentation for periodic 2D time-dependent vector fields. Topological methods <br>aiming in capturing the asymptotic behavior of path lines rarely exist <br>because path lines are usually only defined over a fixed <br>time-interval, making statements about their asymptotic behavior impossible. <br>For the data class of periodic vector fields, this restriction does not apply <br>any more. Our approach detects critical path <br>lines as well as basins from which the path lines converge to the critical <br>ones. We demonstrate our approach on a number of test data sets.}, BOOKTITLE = {EUROVIS 2006~:~Eurographics / IEEE VGTC Symposium on Visualization}, EDITOR = {Sousa Santos, Beatriz and Ertl, Thomas and Joy, Kenneth I. and Fellner, Dieter W. and M{\"o}ller, Torsten and Spencer, Stephen N.}, PAGES = {139--146}, ADDRESS = {Lisbon, Portugal}, }
Endnote
%0 Conference Proceedings %A Shi, Kuangyu %A Theisel, Holger %A Weinkauf, Tino %A Hauser, Helwig %A Hege, Hans-Christian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Path Line Oriented Topology for Periodic 2D Time-dependent Vector Fields : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-23B5-D %F EDOC: 314441 %F OTHER: Local-ID: C125675300671F7B-63EA5E084849561EC125722E00402DF9-shi06a %R 10.2312/VisSym/EuroVis06/139-146 %D 2006 %B Joint Eurographics - IEEE VGTC Symposium on Visualization 2006 %Z date of event: 2006-05-08 - 2006-05-10 %C Lisbon, Portugal %X This paper presents an approach to extracting a path line oriented topological <br>segmentation for periodic 2D time-dependent vector fields. Topological methods <br>aiming in capturing the asymptotic behavior of path lines rarely exist <br>because path lines are usually only defined over a fixed <br>time-interval, making statements about their asymptotic behavior impossible. <br>For the data class of periodic vector fields, this restriction does not apply <br>any more. Our approach detects critical path <br>lines as well as basins from which the path lines converge to the critical <br>ones. We demonstrate our approach on a number of test data sets. %B EUROVIS 2006&#160;:&#160;Eurographics / IEEE VGTC Symposium on Visualization %E Sousa Santos, Beatriz; Ertl, Thomas; Joy, Kenneth I.; Fellner, Dieter W.; M&#246;ller, Torsten; Spencer, Stephen N. %P 139 - 146 %I Eurographics %@ 3-905673-31-2
Schall, O., Belyaev, A., and Seidel, H.-P. 2006a. Feature-preserving Denoising of Time-varying Range Data. SIGGRAPH ’06: ACM SIGGRAPH 2006 Sketches, ACM.
Abstract
We present a technique for accurate denoising of time-varying<br>range data. It is inspired by the idea of similarity-based<br>non-local image filtering and spatio-temporal bilateral filtering<br>for video processing. We build upon both ideas and are to our<br>knowledge the first method which extends them to time-varying<br>geometric data. Our proposed algorithm is easy to implement,<br>preserves fine shape features and produces an accurate and<br>homogeneous smoothing result in the spatial and along the time<br>domains.
Export
BibTeX
@inproceedings{Schall-et-al_SIGGRAPH06.Sketches, TITLE = {Feature-preserving Denoising of Time-varying Range Data}, AUTHOR = {Schall, Oliver and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-59593-364-5}, DOI = {10.1145/1179849.1179919}, LOCALID = {Local-ID: C125675300671F7B-2170AA9825D9308AC1257186004AA06A-sig06sbs}, PUBLISHER = {ACM}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {We present a technique for accurate denoising of time-varying<br>range data. It is inspired by the idea of similarity-based<br>non-local image filtering and spatio-temporal bilateral filtering<br>for video processing. We build upon both ideas and are to our<br>knowledge the first method which extends them to time-varying<br>geometric data. Our proposed algorithm is easy to implement,<br>preserves fine shape features and produces an accurate and<br>homogeneous smoothing result in the spatial and along the time<br>domains.}, BOOKTITLE = {SIGGRAPH '06: ACM SIGGRAPH 2006 Sketches}, EDITOR = {Pfister, Hanspeter}, PAGES = {56}, ADDRESS = {Boston, MA, USA}, }
Endnote
%0 Conference Proceedings %A Schall, Oliver %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Feature-preserving Denoising of Time-varying Range Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-22D1-6 %F EDOC: 314394 %F OTHER: Local-ID: C125675300671F7B-2170AA9825D9308AC1257186004AA06A-sig06sbs %R 10.1145/1179849.1179919 %D 2006 %B ACM SIGGRAPH 2006 %Z date of event: 2006-07-30 - 2006-08-03 %C Boston, MA, USA %X We present a technique for accurate denoising of time-varying<br>range data. It is inspired by the idea of similarity-based<br>non-local image filtering and spatio-temporal bilateral filtering<br>for video processing. We build upon both ideas and are to our<br>knowledge the first method which extends them to time-varying<br>geometric data. Our proposed algorithm is easy to implement,<br>preserves fine shape features and produces an accurate and<br>homogeneous smoothing result in the spatial and along the time<br>domains. %B SIGGRAPH '06: ACM SIGGRAPH 2006 Sketches %E Pfister, Hanspeter %P 56 %I ACM %@ 978-1-59593-364-5
Schall, O., Belyaev, A., and Seidel, H.-P. 2006b. Adaptive Fourier-based Surface Reconstruction. Geometric Modeling and Processing - GMP 2006, Springer.
Abstract
In this paper, we combine Kazhdan's FFT-based approach to<br>surface reconstruction from oriented points with adaptive<br>subdivision and partition of unity blending techniques. The<br>advantages of our surface reconstruction method include a more<br>robust surface restoration in regions where the surface bends<br>close to itself and a lower memory consumption. The latter allows<br>us to achieve a higher reconstruction accuracy than the original<br>global approach. Furthermore, our reconstruction process is<br>guided by a global error control achieved by computing the<br>Hausdorff distance of selected input samples to intermediate<br>reconstructions.
Export
BibTeX
@inproceedings{Schall-et-al_GMP06, TITLE = {Adaptive Fourier-based Surface Reconstruction}, AUTHOR = {Schall, Oliver and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-540-36711-X}, DOI = {10.1007/11802914_3}, LOCALID = {Local-ID: C125675300671F7B-5F4CB40EC76046B8C125718600389B53-gmp06sbs}, PUBLISHER = {Springer}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {In this paper, we combine Kazhdan's FFT-based approach to<br>surface reconstruction from oriented points with adaptive<br>subdivision and partition of unity blending techniques. The<br>advantages of our surface reconstruction method include a more<br>robust surface restoration in regions where the surface bends<br>close to itself and a lower memory consumption. The latter allows<br>us to achieve a higher reconstruction accuracy than the original<br>global approach. Furthermore, our reconstruction process is<br>guided by a global error control achieved by computing the<br>Hausdorff distance of selected input samples to intermediate<br>reconstructions.}, BOOKTITLE = {Geometric Modeling and Processing -- GMP 2006}, EDITOR = {Kim, Myung-Soo and Shimada, Kenji}, PAGES = {34--44}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {4077}, ADDRESS = {Pittsburgh, PA, USA}, }
Endnote
%0 Conference Proceedings %A Schall, Oliver %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Adaptive Fourier-based Surface Reconstruction : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-21D7-6 %F EDOC: 314654 %F OTHER: Local-ID: C125675300671F7B-5F4CB40EC76046B8C125718600389B53-gmp06sbs %R 10.1007/11802914_3 %D 2006 %B 4th International Conference on Geometric Modeling and Processing %Z date of event: 2006-07-26 - 2006-07-28 %C Pittsburgh, PA, USA %X In this paper, we combine Kazhdan's FFT-based approach to<br>surface reconstruction from oriented points with adaptive<br>subdivision and partition of unity blending techniques. The<br>advantages of our surface reconstruction method include a more<br>robust surface restoration in regions where the surface bends<br>close to itself and a lower memory consumption. The latter allows<br>us to achieve a higher reconstruction accuracy than the original<br>global approach. Furthermore, our reconstruction process is<br>guided by a global error control achieved by computing the<br>Hausdorff distance of selected input samples to intermediate<br>reconstructions. %B Geometric Modeling and Processing - GMP 2006 %E Kim, Myung-Soo; Shimada, Kenji %P 34 - 44 %I Springer %@ 3-540-36711-X %B Lecture Notes in Computer Science %N 4077 %U https://rdcu.be/dHR42
Schall, O., Belyaev, A., and Seidel, H.-P. 2006c. Feature-preserving non-local denoising of static and time-varying range data. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
We present a novel algorithm for accurately denoising static and time-varying range data. Our approach is inspired by similarity-based non-local image filtering. We show that our proposed method is easy to implement and outperforms recent state-of-the-art filtering approaches. Furthermore, it preserves fine shape features and produces an accurate smoothing result in the spatial and along the time domain.
Export
BibTeX
@techreport{SchallBelyaevSeidel2006, TITLE = {Feature-preserving non-local denoising of static and time-varying range data}, AUTHOR = {Schall, Oliver and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2006-4-007}, NUMBER = {MPI-I-2006-4-007}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {We present a novel algorithm for accurately denoising static and time-varying range data. Our approach is inspired by similarity-based non-local image filtering. We show that our proposed method is easy to implement and outperforms recent state-of-the-art filtering approaches. Furthermore, it preserves fine shape features and produces an accurate smoothing result in the spatial and along the time domain.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Schall, Oliver %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Feature-preserving non-local denoising of static and time-varying range data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-673D-7 %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2006-4-007 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2006 %P 22 p. %X We present a novel algorithm for accurately denoising static and time-varying range data. Our approach is inspired by similarity-based non-local image filtering. We show that our proposed method is easy to implement and outperforms recent state-of-the-art filtering approaches. Furthermore, it preserves fine shape features and produces an accurate smoothing result in the spatial and along the time domain. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Sauber, N., Theisel, H., and Seidel, H.-P. 2006. Multifield-Graphs: An Approach to Visualizing Correlations in Multifield Scalar Data. IEEE Transactions on Visualization and Computer Graphics (Proc. VIS 2006), IEEE.
Abstract
We present an approach to visualizing <br>correlations in 3D multifield scalar data.<br>The core of our approach is the computation of correlation fields, which are <br>scalar fields<br>containing the local correlations of<br>subsets of the multiple fields. <br>While the visualization of the correlation fields can be done using<br>standard 3D volume visualization techniques, their huge number makes selection <br>and<br>handling a challenge.<br>We introduce the Multifield-Graph to give an overview of which multiple fields<br>correlate and to show the strength of their correlation. This information<br>guides the selection of informative correlation fields for visualization. <br>We use our approach to visually analyze a number of real and <br>synthetic multifield datasets.
Export
BibTeX
@inproceedings{Sauber-et-al_VIS06, TITLE = {Multifield-Graphs: An Approach to Visualizing Correlations in Multifield Scalar Data}, AUTHOR = {Sauber, Natascha and Theisel, Holger and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2006.165}, LOCALID = {Local-ID: C125675300671F7B-3E172BCA04A69760C12571B00064B001-sauber2005}, PUBLISHER = {IEEE}, PUBLISHER = {IEEE Computer Society}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {We present an approach to visualizing <br>correlations in 3D multifield scalar data.<br>The core of our approach is the computation of correlation fields, which are <br>scalar fields<br>containing the local correlations of<br>subsets of the multiple fields. <br>While the visualization of the correlation fields can be done using<br>standard 3D volume visualization techniques, their huge number makes selection <br>and<br>handling a challenge.<br>We introduce the Multifield-Graph to give an overview of which multiple fields<br>correlate and to show the strength of their correlation. This information<br>guides the selection of informative correlation fields for visualization. <br>We use our approach to visually analyze a number of real and <br>synthetic multifield datasets.}, BOOKTITLE = {IEEE Visualization Conference 2006}, EDITOR = {Gr{\"o}ller, Eduard and Pang, Alex and Silva, Claudio T. and Stasko, John and van Wijk, Jarke}, PAGES = {917--924}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics (Proc. VIS)}, VOLUME = {12}, ISSUE = {5}, ADDRESS = {Baltimore, MD, USA}, }
Endnote
%0 Conference Proceedings %A Sauber, Natascha %A Theisel, Holger %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Multifield-Graphs: An Approach to Visualizing Correlations in Multifield Scalar Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-237D-D %F EDOC: 314498 %F OTHER: Local-ID: C125675300671F7B-3E172BCA04A69760C12571B00064B001-sauber2005 %R 10.1109/TVCG.2006.165 %D 2006 %B 2006 IEEE Conference on Visualization %Z date of event: 2006-10-29 - 2006-11-03 %C Baltimore, MD, USA %X We present an approach to visualizing <br>correlations in 3D multifield scalar data.<br>The core of our approach is the computation of correlation fields, which are <br>scalar fields<br>containing the local correlations of<br>subsets of the multiple fields. <br>While the visualization of the correlation fields can be done using<br>standard 3D volume visualization techniques, their huge number makes selection <br>and<br>handling a challenge.<br>We introduce the Multifield-Graph to give an overview of which multiple fields<br>correlate and to show the strength of their correlation. This information<br>guides the selection of informative correlation fields for visualization. <br>We use our approach to visually analyze a number of real and <br>synthetic multifield datasets. %B IEEE Visualization Conference 2006 %E Gr&#246;ller, Eduard; Pang, Alex; Silva, Claudio T.; Stasko, John; van Wijk, Jarke %P 917 - 924 %I IEEE %J IEEE Transactions on Visualization and Computer Graphics %V 12 %N 5 %I IEEE Computer Society %@ false
Saleem, W., Wang, D., Belyaev, A., and Seidel, H.-P. 2006. Statistical Learning for Shape Applications. 1st International Symposium on Shapes and Semantics, CNR.
Abstract
Statistical methods are well suited to the large amounts of data typically involved in digital shape applications. In this paper, we look at two statistical learning methods related to digital shape processing. The first, \textit{neural meshes}, learns the shape of a given point cloud \--- the surface reconstruction problem \--- in $O(n^2)$ time. We present an alternate implementation of the algorithm that takes $O(n\log n)$ time. Secondly, we present a simple method to automatically learn the correct orientation of a shape in an image from a database of images with correctly oriented shapes.
Export
BibTeX
@inproceedings{SaleemWBS_wss2006, TITLE = {Statistical Learning for Shape Applications}, AUTHOR = {Saleem, Waqar and Wang, Danyi and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, LOCALID = {Local-ID: C125675300671F7B-F199D73592F8196BC125719900561BF0-SaleemWBS_wss2006}, PUBLISHER = {CNR}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {Statistical methods are well suited to the large amounts of data typically involved in digital shape applications. In this paper, we look at two statistical learning methods related to digital shape processing. The first, \textit{neural meshes}, learns the shape of a given point cloud \--- the surface reconstruction problem \--- in $O(n^2)$ time. We present an alternate implementation of the algorithm that takes $O(n\log n)$ time. Secondly, we present a simple method to automatically learn the correct orientation of a shape in an image from a database of images with correctly oriented shapes.}, BOOKTITLE = {1st International Symposium on Shapes and Semantics}, PAGES = {53--60}, }
Endnote
%0 Conference Proceedings %A Saleem, Waqar %A Wang, Danyi %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Statistical Learning for Shape Applications : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2402-6 %F EDOC: 314643 %F OTHER: Local-ID: C125675300671F7B-F199D73592F8196BC125719900561BF0-SaleemWBS_wss2006 %I CNR %D 2006 %B Untitled Event %Z date of event: 2006-06-17 - %C Matsushima, JAPAN %X Statistical methods are well suited to the large amounts of data typically involved in digital shape applications. In this paper, we look at two statistical learning methods related to digital shape processing. The first, \textit{neural meshes}, learns the shape of a given point cloud \--- the surface reconstruction problem \--- in $O(n^2)$ time. We present an alternate implementation of the algorithm that takes $O(n\log n)$ time. Secondly, we present a simple method to automatically learn the correct orientation of a shape in an image from a database of images with correctly oriented shapes. %B 1st International Symposium on Shapes and Semantics %P 53 - 60 %I CNR
Rosenhahn, B., Brox, T., Cremers, D., and Seidel, H.-P. 2006a. A Comparison of Shape Matching Methods for Contour Based Pose Estimation. Combinatorial Image Analysis ( IWCIA 2006), Springer.
Abstract
In this paper, we analyze two conceptionally different approaches for shape <br>matching: the well-known iterated closest point (ICP) algorithm and variational <br>shape registration via level sets. For the latter, we suggest to use a <br>numerical scheme which was introduced in the context of optic flow estimation. <br>For the comparison, we focus on the application of shape matching in the <br>context of pose estimation of 3-D objects by means of their silhouettes in <br>stereo camera views. It turns out that both methods have their specific <br>shortcomings. With the possibility of the pose estimation framework to combine <br>correspondences from two different methods, we show that such a combination <br>improves the stability and convergence behavior of the pose estimation <br>algorithm. <br>We gratefully acknowledge funding by the DFG project CR250/1 and the Max-Planck <br>Center for visual computing and communication.
Export
BibTeX
@inproceedings{Rosenhahn-et-al_IWCIA06, TITLE = {A Comparison of Shape Matching Methods for Contour Based Pose Estimation}, AUTHOR = {Rosenhahn, Bodo and Brox, Thomas and Cremers, Daniel and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-540-35153-1}, DOI = {10.1007/11774938_21}, LOCALID = {Local-ID: C125675300671F7B-660A5F57E8AF1CABC125722D0051471C-RosenhahnIWCIA2006}, PUBLISHER = {Springer}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {In this paper, we analyze two conceptionally different approaches for shape <br>matching: the well-known iterated closest point (ICP) algorithm and variational <br>shape registration via level sets. For the latter, we suggest to use a <br>numerical scheme which was introduced in the context of optic flow estimation. <br>For the comparison, we focus on the application of shape matching in the <br>context of pose estimation of 3-D objects by means of their silhouettes in <br>stereo camera views. It turns out that both methods have their specific <br>shortcomings. With the possibility of the pose estimation framework to combine <br>correspondences from two different methods, we show that such a combination <br>improves the stability and convergence behavior of the pose estimation <br>algorithm. <br>We gratefully acknowledge funding by the DFG project CR250/1 and the Max-Planck <br>Center for visual computing and communication.}, BOOKTITLE = {Combinatorial Image Analysis ( IWCIA 2006)}, EDITOR = {Reulke, Ralf and Eckhardt, Ulrich and Flach, Boris and Knauer, Uwe and Polthier, Konrad}, PAGES = {263--276}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {4040}, ADDRESS = {Berlin, Germany}, }
Endnote
%0 Conference Proceedings %A Rosenhahn, Bodo %A Brox, Thomas %A Cremers, Daniel %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T A Comparison of Shape Matching Methods for Contour Based Pose Estimation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-21CB-2 %F EDOC: 314569 %F OTHER: Local-ID: C125675300671F7B-660A5F57E8AF1CABC125722D0051471C-RosenhahnIWCIA2006 %R 10.1007/11774938_21 %D 2006 %B 11th International Workshop on Combinatorial Image Analysis %Z date of event: 2006-06-19 - 2006-06-21 %C Berlin, Germany %X In this paper, we analyze two conceptionally different approaches for shape <br>matching: the well-known iterated closest point (ICP) algorithm and variational <br>shape registration via level sets. For the latter, we suggest to use a <br>numerical scheme which was introduced in the context of optic flow estimation. <br>For the comparison, we focus on the application of shape matching in the <br>context of pose estimation of 3-D objects by means of their silhouettes in <br>stereo camera views. It turns out that both methods have their specific <br>shortcomings. With the possibility of the pose estimation framework to combine <br>correspondences from two different methods, we show that such a combination <br>improves the stability and convergence behavior of the pose estimation <br>algorithm. <br>We gratefully acknowledge funding by the DFG project CR250/1 and the Max-Planck <br>Center for visual computing and communication. %B Combinatorial Image Analysis %E Reulke, Ralf; Eckhardt, Ulrich; Flach, Boris; Knauer, Uwe; Polthier, Konrad %P 263 - 276 %I Springer %@ 3-540-35153-1 %B Lecture Notes in Computer Science %N 4040 %U https://rdcu.be/dHMIa
Rosenhahn, B., Kersting, U., Powell, K., and Seidel, H.-P. 2006b. Cloth X-Ray: MoCap of People Wearing Textiles. Pattern Recognition (DAGM 2006), Springer.
Abstract
The contribution presents an approach for motion capturing (MoCap) of dressed <br>people.<br>A cloth draping method is embedded in a silhouette based MoCap system and an <br>error functional is formalized to<br>minimize image errors with respect to silhouettes, pose and kinematic chain <br>parameters, the cloth<br>draping components and external wind forces. We report on various experiments <br>with two types of clothes, namely<br>a skirt and a pair of shorts. Finally we compare the angles of the MoCap system <br>with<br>results from a commercially available marker based tracking system. The <br>experiments show, that we are<br>basically within the error range of marker based tracking systems, though body <br>parts are occluded with cloth.
Export
BibTeX
@inproceedings{Rosenhahn-et-al_DAGM06, TITLE = {Cloth X-Ray: {MoCap} of People Wearing Textiles}, AUTHOR = {Rosenhahn, Bodo and Kersting, Uwe and Powell, Katie and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-540-44412-1}, DOI = {10.1007/11861898_50}, LOCALID = {Local-ID: C125675300671F7B-A11DB61ED0EAB612C125722D0050349F-RosenhahnDAGM2006a}, PUBLISHER = {Springer}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {The contribution presents an approach for motion capturing (MoCap) of dressed <br>people.<br>A cloth draping method is embedded in a silhouette based MoCap system and an <br>error functional is formalized to<br>minimize image errors with respect to silhouettes, pose and kinematic chain <br>parameters, the cloth<br>draping components and external wind forces. We report on various experiments <br>with two types of clothes, namely<br>a skirt and a pair of shorts. Finally we compare the angles of the MoCap system <br>with<br>results from a commercially available marker based tracking system. The <br>experiments show, that we are<br>basically within the error range of marker based tracking systems, though body <br>parts are occluded with cloth.}, BOOKTITLE = {Pattern Recognition (DAGM 2006)}, EDITOR = {Franke, Katrin and M{\"u}ller, Klaus R. and Nickolay, Bertram and Sch{\"a}fer, Ralf}, PAGES = {495--504}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {4174}, ADDRESS = {Berlin, Germany}, }
Endnote
%0 Conference Proceedings %A Rosenhahn, Bodo %A Kersting, Uwe %A Powell, Katie %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Cloth X-Ray: MoCap of People Wearing Textiles : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-224F-C %F EDOC: 314483 %F OTHER: Local-ID: C125675300671F7B-A11DB61ED0EAB612C125722D0050349F-RosenhahnDAGM2006a %R 10.1007/11861898_50 %D 2006 %B 28th DAGM Symposium on Pattern Recognition %Z date of event: 2006-09-12 - 2006-09-14 %C Berlin, Germany %X The contribution presents an approach for motion capturing (MoCap) of dressed <br>people.<br>A cloth draping method is embedded in a silhouette based MoCap system and an <br>error functional is formalized to<br>minimize image errors with respect to silhouettes, pose and kinematic chain <br>parameters, the cloth<br>draping components and external wind forces. We report on various experiments <br>with two types of clothes, namely<br>a skirt and a pair of shorts. Finally we compare the angles of the MoCap system <br>with<br>results from a commercially available marker based tracking system. The <br>experiments show, that we are<br>basically within the error range of marker based tracking systems, though body <br>parts are occluded with cloth. %B Pattern Recognition %E Franke, Katrin; M&#252;ller, Klaus R.; Nickolay, Bertram; Sch&#228;fer, Ralf %P 495 - 504 %I Springer %@ 978-3-540-44412-1 %B Lecture Notes in Computer Science %N 4174 %U https://rdcu.be/dHS7T
Popov, S., Günther, J., Seidel, H.-P., and Slusallek, P. 2006. Experiences with Streaming Construction of SAH KD-Trees. Proceedings of the 2006 IEEE Symposium on Interactive Ray Tracing, IEEE.
Abstract
A major reason for the recent advancements in ray tracing performance is the use of optimized acceleration structures, namely kd-trees based on the surface area heuristic (SAH). Though algorithms exist to build these search trees in $O(n\log n)$, the construction times for larger scenes are still high and do not allow for rebuilding the kd-tree every frame to support dynamic changes. In this paper we propose modifications to previous kd-tree construction algorithms that significantly increase the coherence of memory accesses during construction of the kd-tree. Additionally we provide theoretical and practical results regarding \emph{conservatively} sub-sampling of the SAH cost function.
Export
BibTeX
@inproceedings{popov:06:ESC, TITLE = {Experiences with Streaming Construction of {SAH} {KD}-Trees}, AUTHOR = {Popov, Stefan and G{\"u}nther, Johannes and Seidel, Hans-Peter and Slusallek, Philipp}, EDITOR = {Wald, Ingo and Parker, Steven G.}, LANGUAGE = {eng}, ISBN = {1-4244-0693-5}, LOCALID = {Local-ID: C125675300671F7B-0A22F8966BD4C8ACC125722D0034EC0E-popov:06:ESC}, PUBLISHER = {IEEE}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {A major reason for the recent advancements in ray tracing performance is the use of optimized acceleration structures, namely kd-trees based on the surface area heuristic (SAH). Though algorithms exist to build these search trees in $O(n\log n)$, the construction times for larger scenes are still high and do not allow for rebuilding the kd-tree every frame to support dynamic changes. In this paper we propose modifications to previous kd-tree construction algorithms that significantly increase the coherence of memory accesses during construction of the kd-tree. Additionally we provide theoretical and practical results regarding \emph{conservatively} sub-sampling of the SAH cost function.}, BOOKTITLE = {Proceedings of the 2006 IEEE Symposium on Interactive Ray Tracing}, PAGES = {89--94}, }
Endnote
%0 Conference Proceedings %A Popov, Stefan %A G&#252;nther, Johannes %A Seidel, Hans-Peter %A Slusallek, Philipp %E Wald, Ingo %E Parker, Steven G. %+ International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Experiences with Streaming Construction of SAH KD-Trees : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-22C4-4 %F EDOC: 314627 %F OTHER: Local-ID: C125675300671F7B-0A22F8966BD4C8ACC125722D0034EC0E-popov:06:ESC %I IEEE %D 2006 %B Untitled Event %Z date of event: 2006-09-18 - %C Salt Lake City, USA %X A major reason for the recent advancements in ray tracing performance is the use of optimized acceleration structures, namely kd-trees based on the surface area heuristic (SAH). Though algorithms exist to build these search trees in $O(n\log n)$, the construction times for larger scenes are still high and do not allow for rebuilding the kd-tree every frame to support dynamic changes. In this paper we propose modifications to previous kd-tree construction algorithms that significantly increase the coherence of memory accesses during construction of the kd-tree. Additionally we provide theoretical and practical results regarding \emph{conservatively} sub-sampling of the SAH cost function. %B Proceedings of the 2006 IEEE Symposium on Interactive Ray Tracing %P 89 - 94 %I IEEE %@ 1-4244-0693-5
Ohtake, Y., Belyaev, A., and Seidel, H.-P. 2006a. A Composite Approach to Meshing Scattered Data. Graphical Models68, 3.
Abstract
In this paper, we propose a new method for approximating an unorganized set of <br>points scattered over a piecewise smooth surface by a triangle mesh. The method <br>is based on the Garland-Heckbert local quadric error minimization strategy. <br>First an adaptive spherical cover and auxiliary points corresponding to the <br>cover elements are generated. Then the intersections between the spheres of the <br>cover are analyzed and the auxiliary points are connected. Finally the <br>resulting mesh is cleaned from nonmanifold parts. The method allows us to <br>control the approximation accuracy, process noisy data, and reconstruct sharp <br>edges and corners. Further, the vast majority of the triangles of the generated <br>mesh have their aspect ratios close to optimal. Thus our approach integrates <br>the mesh reconstruction, smoothing, decimation, feature restoration, and <br>remeshing stages together.
Export
BibTeX
@article{Ohtake-et-al_GM06, TITLE = {A Composite Approach to Meshing Scattered Data}, AUTHOR = {Ohtake, Yutaka and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1524-0703}, DOI = {10.1016/j.gmod.2006.03.002}, LOCALID = {Local-ID: C125675300671F7B-8D1740E2F40E9527C12572590043ADE1-Ohtake-gm06b}, PUBLISHER = {Academic Press}, ADDRESS = {San Diego, Calif.}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {In this paper, we propose a new method for approximating an unorganized set of <br>points scattered over a piecewise smooth surface by a triangle mesh. The method <br>is based on the Garland-Heckbert local quadric error minimization strategy. <br>First an adaptive spherical cover and auxiliary points corresponding to the <br>cover elements are generated. Then the intersections between the spheres of the <br>cover are analyzed and the auxiliary points are connected. Finally the <br>resulting mesh is cleaned from nonmanifold parts. The method allows us to <br>control the approximation accuracy, process noisy data, and reconstruct sharp <br>edges and corners. Further, the vast majority of the triangles of the generated <br>mesh have their aspect ratios close to optimal. Thus our approach integrates <br>the mesh reconstruction, smoothing, decimation, feature restoration, and <br>remeshing stages together.}, JOURNAL = {Graphical Models}, VOLUME = {68}, NUMBER = {3}, PAGES = {255--267}, }
Endnote
%0 Journal Article %A Ohtake, Yutaka %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Composite Approach to Meshing Scattered Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-21CE-B %F EDOC: 314395 %F OTHER: Local-ID: C125675300671F7B-8D1740E2F40E9527C12572590043ADE1-Ohtake-gm06b %R 10.1016/j.gmod.2006.03.002 %D 2006 %* Review method: peer-reviewed %X In this paper, we propose a new method for approximating an unorganized set of <br>points scattered over a piecewise smooth surface by a triangle mesh. The method <br>is based on the Garland-Heckbert local quadric error minimization strategy. <br>First an adaptive spherical cover and auxiliary points corresponding to the <br>cover elements are generated. Then the intersections between the spheres of the <br>cover are analyzed and the auxiliary points are connected. Finally the <br>resulting mesh is cleaned from nonmanifold parts. The method allows us to <br>control the approximation accuracy, process noisy data, and reconstruct sharp <br>edges and corners. Further, the vast majority of the triangles of the generated <br>mesh have their aspect ratios close to optimal. Thus our approach integrates <br>the mesh reconstruction, smoothing, decimation, feature restoration, and <br>remeshing stages together. %J Graphical Models %V 68 %N 3 %& 255 %P 255 - 267 %I Academic Press %C San Diego, Calif. %@ false
Ohtake, Y., Belyaev, A., and Seidel, H.-P. 2006b. Sparse Surface Reconstruction with Adaptive Partition of Unity and Radial Basis Functions. Graphical Models68, 1.
Abstract
A new implicit surface fitting method for surface reconstruction <br>from scattered point data is proposed. The method combines an <br>adaptive partition of unity approximation with least-squares RBF <br>fitting and is capable of generating a high quality surface <br>reconstruction. Given a set of points scattered over a smooth surface, <br>first a sparse set of overlapped local approximations is constructed.<br>The partition of unity generated from these local<br>approximants already gives a faithful surface reconstruction.<br>The final reconstruction is obtained by adding compactly supported <br>RBFs. The main feature of the developed approach consists of <br>using various regularization schemes which lead to economical, <br>yet accurate surface reconstruction.
Export
BibTeX
@article{Ohtake-et-al_GM06, TITLE = {Sparse Surface Reconstruction with Adaptive Partition of Unity and Radial Basis Functions}, AUTHOR = {Ohtake, Yutaka and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1524-0703}, DOI = {10.1016/j.gmod.2005.08.001}, LOCALID = {Local-ID: C125675300671F7B-BD82841960CEFE13C12570F8004B68E9-Ohtake-gmod06a}, PUBLISHER = {Academic Press}, ADDRESS = {San Diego, Calif.}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {A new implicit surface fitting method for surface reconstruction <br>from scattered point data is proposed. The method combines an <br>adaptive partition of unity approximation with least-squares RBF <br>fitting and is capable of generating a high quality surface <br>reconstruction. Given a set of points scattered over a smooth surface, <br>first a sparse set of overlapped local approximations is constructed.<br>The partition of unity generated from these local<br>approximants already gives a faithful surface reconstruction.<br>The final reconstruction is obtained by adding compactly supported <br>RBFs. The main feature of the developed approach consists of <br>using various regularization schemes which lead to economical, <br>yet accurate surface reconstruction.}, JOURNAL = {Graphical Models}, VOLUME = {68}, NUMBER = {1}, PAGES = {15--24}, }
Endnote
%0 Journal Article %A Ohtake, Yutaka %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Sparse Surface Reconstruction with Adaptive Partition of Unity and Radial Basis Functions : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-23F6-A %F EDOC: 314459 %F OTHER: Local-ID: C125675300671F7B-BD82841960CEFE13C12570F8004B68E9-Ohtake-gmod06a %R 10.1016/j.gmod.2005.08.001 %D 2006 %* Review method: peer-reviewed %X A new implicit surface fitting method for surface reconstruction <br>from scattered point data is proposed. The method combines an <br>adaptive partition of unity approximation with least-squares RBF <br>fitting and is capable of generating a high quality surface <br>reconstruction. Given a set of points scattered over a smooth surface, <br>first a sparse set of overlapped local approximations is constructed.<br>The partition of unity generated from these local<br>approximants already gives a faithful surface reconstruction.<br>The final reconstruction is obtained by adding compactly supported <br>RBFs. The main feature of the developed approach consists of <br>using various regularization schemes which lead to economical, <br>yet accurate surface reconstruction. %J Graphical Models %V 68 %N 1 %& 15 %P 15 - 24 %I Academic Press %C San Diego, Calif. %@ false
Nishita, T., Peng, Q., and Seidel, H.-P., eds. 2006. Advances in Computer Graphics, 24th Computer Graphics International Conference, CGI 2006, Proceedings. Springer.
Export
BibTeX
@proceedings{DBLP:conf/cgi/2006, TITLE = {Advances in Computer Graphics, 24th Computer Graphics International Conference, CGI 2006, Proceedings}, EDITOR = {Nishita, Tomoyuki and Peng, Qunsheng and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-540-35638-X}, DOI = {10.1007/11784203}, PUBLISHER = {Springer}, YEAR = {2005}, DATE = {2006}, ADDRESS = {Hangzhou, China}, }
Endnote
%0 Conference Proceedings %E Nishita, Tomoyuki %E Peng, Qunsheng %E Seidel, Hans-Peter %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Advances in Computer Graphics, 24th Computer Graphics International Conference, CGI 2006, Proceedings : %G eng %U http://hdl.handle.net/21.11116/0000-000F-43D7-3 %@ 3-540-35638-X %R 10.1007/11784203 %I Springer %D 2006 %B 24th Computer Graphics International Conference %Z date of event: 2005-06-26 - 2005-06-28 %D 2005 %C Hangzhou, China
Neff, M. and Seidel, H.-P. 2006. Modeling Relaxed Hand Shape for Character Animation. Articulated Motion and Deformable Objects (AMDO 2006), Springer.
Abstract
We present a technique for modeling the deformations that<br>occur to hand pose under the influence of gravity when the hand is kept in<br>a relaxed state. A dynamic model of the hand is built using Proportional-<br>Derivative controllers as a first order approximation to muscles. A process<br>for tuning the model to match the relaxed hand shape of subjects is discussed.<br>Once the model is tuned, it can be used to sample the space of<br>all possible arm orientations and samples of wrist and finger angles are<br>taken. From these samples, a kinematic model of passive hand deformation<br>is built. Either the tuned dynamic model or the kinematic model can<br>be used to generate final animations. These techniques increase the realism<br>of gesture animation, where the character often maintains a relaxed<br>hand.
Export
BibTeX
@inproceedings{Neff-Seidel_AMDO06, TITLE = {Modeling Relaxed Hand Shape for Character Animation}, AUTHOR = {Neff, Michael and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-540-36031-X}, DOI = {10.1007/11789239_27}, LOCALID = {Local-ID: C125675300671F7B-4D05615CDBE13BBDC12571D90053CA49-Neff2006a}, PUBLISHER = {Springer}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {We present a technique for modeling the deformations that<br>occur to hand pose under the influence of gravity when the hand is kept in<br>a relaxed state. A dynamic model of the hand is built using Proportional-<br>Derivative controllers as a first order approximation to muscles. A process<br>for tuning the model to match the relaxed hand shape of subjects is discussed.<br>Once the model is tuned, it can be used to sample the space of<br>all possible arm orientations and samples of wrist and finger angles are<br>taken. From these samples, a kinematic model of passive hand deformation<br>is built. Either the tuned dynamic model or the kinematic model can<br>be used to generate final animations. These techniques increase the realism<br>of gesture animation, where the character often maintains a relaxed<br>hand.}, BOOKTITLE = {Articulated Motion and Deformable Objects (AMDO 2006)}, EDITOR = {Perales, Francisco Jos{\'e} and Fisher, Robert B.}, PAGES = {262--270}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {4069}, ADDRESS = {Port d'Andratx, Spain}, }
Endnote
%0 Conference Proceedings %A Neff, Michael %A Seidel, Hans-Peter %+ Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Modeling Relaxed Hand Shape for Character Animation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2374-0 %F EDOC: 314507 %F OTHER: Local-ID: C125675300671F7B-4D05615CDBE13BBDC12571D90053CA49-Neff2006a %R 10.1007/11789239_27 %D 2006 %B 4th International Conference on Articulated Motion and Deformable Objects %Z date of event: 2006-07-11 - 2006-07-14 %C Port d'Andratx, Spain %X We present a technique for modeling the deformations that<br>occur to hand pose under the influence of gravity when the hand is kept in<br>a relaxed state. A dynamic model of the hand is built using Proportional-<br>Derivative controllers as a first order approximation to muscles. A process<br>for tuning the model to match the relaxed hand shape of subjects is discussed.<br>Once the model is tuned, it can be used to sample the space of<br>all possible arm orientations and samples of wrist and finger angles are<br>taken. From these samples, a kinematic model of passive hand deformation<br>is built. Either the tuned dynamic model or the kinematic model can<br>be used to generate final animations. These techniques increase the realism<br>of gesture animation, where the character often maintains a relaxed<br>hand. %B Articulated Motion and Deformable Objects %E Perales, Francisco Jos&#233;; Fisher, Robert B. %P 262 - 270 %I Springer %@ 3-540-36031-X %B Lecture Notes in Computer Science %N 4069 %U https://rdcu.be/dH0t9
Mantiuk, R., Efremov, A., Myszkowski, K., and Seidel, H.-P. 2006a. Backward Compatible High Dynamic Range MPEG Video Compression. ACM Transactions on Graphics, ACM.
Abstract
To embrace the imminent transition from traditional low-contrast<br> video (LDR) content to superior high dynamic range (HDR) content, we<br> propose a novel backward compatible HDR video compression (HDR~MPEG)<br> method. We introduce a compact reconstruction function that is used<br> to decompose an HDR video stream into a residual stream and a<br> standard LDR stream, which can be played on existing MPEG decoders,<br> such as DVD players. The reconstruction function is finely tuned to<br> the content of each HDR frame to achieve strong decorrelation<br> between the LDR and residual streams, which minimizes the amount of<br> redundant information. The size of the residual stream is further<br> reduced by removing invisible details prior to compression using our<br> HDR-enabled filter, which models luminance adaptation, contrast<br> sensitivity, and visual masking based on the HDR content. Designed<br> especially for DVD movie distribution, our HDR~MPEG compression<br> method features low storage requirements for HDR content resulting<br> in a 30\% size increase to an LDR video sequence. The proposed<br> compression method does not impose restrictions or modify the<br> appearance of the LDR or HDR video. This is important for backward<br> compatibility of the LDR stream with current DVD appearance, and<br> also enables independent fine tuning, tone mapping, and color<br> grading of both streams.
Export
BibTeX
@inproceedings{Mantiuk-et-al_SIGGRAPH06, TITLE = {Backward Compatible High Dynamic Range {MPEG} Video Compression}, AUTHOR = {Mantiuk, Rafa{\l} and Efremov, Alexander and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/1141911.1141946}, LOCALID = {Local-ID: C125675300671F7B-1B2B94EF48903F44C1257149002EEC16-Mantiuk2006:hdrmpeg}, PUBLISHER = {ACM}, PUBLISHER = {Association for Computing Machinery}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {To embrace the imminent transition from traditional low-contrast<br> video (LDR) content to superior high dynamic range (HDR) content, we<br> propose a novel backward compatible HDR video compression (HDR~MPEG)<br> method. We introduce a compact reconstruction function that is used<br> to decompose an HDR video stream into a residual stream and a<br> standard LDR stream, which can be played on existing MPEG decoders,<br> such as DVD players. The reconstruction function is finely tuned to<br> the content of each HDR frame to achieve strong decorrelation<br> between the LDR and residual streams, which minimizes the amount of<br> redundant information. The size of the residual stream is further<br> reduced by removing invisible details prior to compression using our<br> HDR-enabled filter, which models luminance adaptation, contrast<br> sensitivity, and visual masking based on the HDR content. Designed<br> especially for DVD movie distribution, our HDR~MPEG compression<br> method features low storage requirements for HDR content resulting<br> in a 30\% size increase to an LDR video sequence. The proposed<br> compression method does not impose restrictions or modify the<br> appearance of the LDR or HDR video. This is important for backward<br> compatibility of the LDR stream with current DVD appearance, and<br> also enables independent fine tuning, tone mapping, and color<br> grading of both streams.}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2006}, EDITOR = {Dorsey, Julie}, PAGES = {713--723}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {25}, ISSUE = {3}, ADDRESS = {Boston, MA, USA}, }
Endnote
%0 Conference Proceedings %A Mantiuk, Rafa&#322; %A Efremov, Alexander %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Backward Compatible High Dynamic Range MPEG Video Compression : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-223A-9 %F EDOC: 314605 %F OTHER: Local-ID: C125675300671F7B-1B2B94EF48903F44C1257149002EEC16-Mantiuk2006:hdrmpeg %R 10.1145/1141911.1141946 %D 2006 %B SIGGRAPH 2006: 33rd Annual Conference on Computer Graphics and Interactive Techniques %Z date of event: 2006-07-30 - 2006-08-03 %C Boston, MA, USA %X To embrace the imminent transition from traditional low-contrast<br> video (LDR) content to superior high dynamic range (HDR) content, we<br> propose a novel backward compatible HDR video compression (HDR~MPEG)<br> method. We introduce a compact reconstruction function that is used<br> to decompose an HDR video stream into a residual stream and a<br> standard LDR stream, which can be played on existing MPEG decoders,<br> such as DVD players. The reconstruction function is finely tuned to<br> the content of each HDR frame to achieve strong decorrelation<br> between the LDR and residual streams, which minimizes the amount of<br> redundant information. The size of the residual stream is further<br> reduced by removing invisible details prior to compression using our<br> HDR-enabled filter, which models luminance adaptation, contrast<br> sensitivity, and visual masking based on the HDR content. Designed<br> especially for DVD movie distribution, our HDR~MPEG compression<br> method features low storage requirements for HDR content resulting<br> in a 30\% size increase to an LDR video sequence. The proposed<br> compression method does not impose restrictions or modify the<br> appearance of the LDR or HDR video. This is important for backward<br> compatibility of the LDR stream with current DVD appearance, and<br> also enables independent fine tuning, tone mapping, and color<br> grading of both streams. %B Proceedings of ACM SIGGRAPH 2006 %E Dorsey, Julie %P 713 - 723 %I ACM %J ACM Transactions on Graphics %V 25 %N 3 %I Association for Computing Machinery %@ false
Mantiuk, R., Myszkowski, K., and Seidel, H.-P. 2006b. Lossy Compression of High Dynamic Range Images and Video. Human Vision and Electronic Imaging XI, SPIE.
Abstract
Most common image and video formats have been designed to work with<br> existing output devices, like LCD or CRT monitors. As display<br> technology makes progress, these formats no longer represent the<br> data that new devices can display. Therefore a shift towards higher<br> precision image and video formats is imminent.<br><br> To overcome limitations of common image and video formats, such as<br> JPEG, PNG or MPEG, we propose a novel color space, which can<br> accommodate an extended dynamic range and guarantees the precision<br> that is below the visibility threshold. The proposed color space,<br> which is derived from contrast detection data, can represent the<br> full range of luminance values and the complete color gamut that is<br> visible to the human eye. We show that only minor changes are<br> required to the existing encoding algorithms to accommodate the new<br> color space and therefore greatly enhance information content of the<br> visual data. We demonstrate this with two compression algorithms for<br> High Dynamic Range (HDR) visual data: for static images and for<br> video. We argue that the proposed HDR representation is a simple and<br> universal way to encode visual data independent of the display or<br> capture technology.
Export
BibTeX
@inproceedings{Mantiuk-et-al_SPIE06, TITLE = {Lossy Compression of High Dynamic Range Images and Video}, AUTHOR = {Mantiuk, Rafa{\l} and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1117/12.639140}, LOCALID = {Local-ID: C125675300671F7B-313F8F727ABF44C0C125713800369E82-Mantiuk2005:LossyCompression}, PUBLISHER = {SPIE}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {Most common image and video formats have been designed to work with<br> existing output devices, like LCD or CRT monitors. As display<br> technology makes progress, these formats no longer represent the<br> data that new devices can display. Therefore a shift towards higher<br> precision image and video formats is imminent.<br><br> To overcome limitations of common image and video formats, such as<br> JPEG, PNG or MPEG, we propose a novel color space, which can<br> accommodate an extended dynamic range and guarantees the precision<br> that is below the visibility threshold. The proposed color space,<br> which is derived from contrast detection data, can represent the<br> full range of luminance values and the complete color gamut that is<br> visible to the human eye. We show that only minor changes are<br> required to the existing encoding algorithms to accommodate the new<br> color space and therefore greatly enhance information content of the<br> visual data. We demonstrate this with two compression algorithms for<br> High Dynamic Range (HDR) visual data: for static images and for<br> video. We argue that the proposed HDR representation is a simple and<br> universal way to encode visual data independent of the display or<br> capture technology.}, BOOKTITLE = {Human Vision and Electronic Imaging XI}, EDITOR = {Rogowitz, Bernice E. and Pappas, Thrasyvoulos N. and Daly, Scott J.}, SERIES = {SPIE}, VOLUME = {6057}, ADDRESS = {San Jose, USA}, }
Endnote
%0 Conference Proceedings %A Mantiuk, Rafa&#322; %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Lossy Compression of High Dynamic Range Images and Video : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-235C-8 %F EDOC: 314546 %F OTHER: Local-ID: C125675300671F7B-313F8F727ABF44C0C125713800369E82-Mantiuk2005:LossyCompression %R 10.1117/12.639140 %D 2006 %B Electronic Imaging 2006 %Z date of event: 2006-01-15 - 2006-01-19 %C San Jose, USA %X Most common image and video formats have been designed to work with<br> existing output devices, like LCD or CRT monitors. As display<br> technology makes progress, these formats no longer represent the<br> data that new devices can display. Therefore a shift towards higher<br> precision image and video formats is imminent.<br><br> To overcome limitations of common image and video formats, such as<br> JPEG, PNG or MPEG, we propose a novel color space, which can<br> accommodate an extended dynamic range and guarantees the precision<br> that is below the visibility threshold. The proposed color space,<br> which is derived from contrast detection data, can represent the<br> full range of luminance values and the complete color gamut that is<br> visible to the human eye. We show that only minor changes are<br> required to the existing encoding algorithms to accommodate the new<br> color space and therefore greatly enhance information content of the<br> visual data. We demonstrate this with two compression algorithms for<br> High Dynamic Range (HDR) visual data: for static images and for<br> video. We argue that the proposed HDR representation is a simple and<br> universal way to encode visual data independent of the display or<br> capture technology. %B Human Vision and Electronic Imaging XI %E Rogowitz, Bernice E.; Pappas, Thrasyvoulos N.; Daly, Scott J. %I SPIE %B SPIE %N 6057
Mantiuk, R., Myszkowski, K., and Seidel, H.-P. 2006c. A Perceptual Framework for Contrast Processing of High Dynamic Range Images. ACM Transactions on Applied Perception3, 3.
Abstract
Image processing often involves an image transformation into a<br> domain that is better correlated with visual perception, such as the<br> wavelet domain, image pyramids, multi-scale contrast<br> representations, contrast in retinex algorithms, and chroma,<br> lightness and colorfulness predictors in color appearance models.<br> Many of these transformations are not ideally suited for image<br> processing that significantly modifies an image. For example, the<br> modification of a single band in a multi-scale model leads to an<br> unrealistic image with severe halo artifacts. Inspired by gradient<br> domain methods we derive a framework that imposes constraints on the<br> entire set of contrasts in an image for a full range of spatial<br> frequencies. This way, even severe image modifications do not<br> reverse the polarity of contrast. The strengths of the framework are<br> demonstrated by aggressive contrast enhancement and a visually<br> appealing tone mapping which does not introduce artifacts.<br> Additionally, we perceptually linearize contrast magnitudes using a<br> custom transducer function. The transducer function has been derived<br> especially for the purpose of HDR images, based on the contrast<br> discrimination measurements for high contrast stimuli.
Export
BibTeX
@article{Mantiuk-et-al_TAP06, TITLE = {A Perceptual Framework for Contrast Processing of High Dynamic Range Images}, AUTHOR = {Mantiuk, Rafa{\l} and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1544-3558}, DOI = {10.1145/1166087.1166095}, LOCALID = {Local-ID: C125675300671F7B-43FC98F7A2FC192EC1257149002E3B9A-Mantiuk2006:ContrastDomain}, PUBLISHER = {Association for Computing Machinery}, ADDRESS = {New York, NY}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {Image processing often involves an image transformation into a<br> domain that is better correlated with visual perception, such as the<br> wavelet domain, image pyramids, multi-scale contrast<br> representations, contrast in retinex algorithms, and chroma,<br> lightness and colorfulness predictors in color appearance models.<br> Many of these transformations are not ideally suited for image<br> processing that significantly modifies an image. For example, the<br> modification of a single band in a multi-scale model leads to an<br> unrealistic image with severe halo artifacts. Inspired by gradient<br> domain methods we derive a framework that imposes constraints on the<br> entire set of contrasts in an image for a full range of spatial<br> frequencies. This way, even severe image modifications do not<br> reverse the polarity of contrast. The strengths of the framework are<br> demonstrated by aggressive contrast enhancement and a visually<br> appealing tone mapping which does not introduce artifacts.<br> Additionally, we perceptually linearize contrast magnitudes using a<br> custom transducer function. The transducer function has been derived<br> especially for the purpose of HDR images, based on the contrast<br> discrimination measurements for high contrast stimuli.}, JOURNAL = {ACM Transactions on Applied Perception}, VOLUME = {3}, NUMBER = {3}, PAGES = {286--308}, }
Endnote
%0 Journal Article %A Mantiuk, Rafa&#322; %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Perceptual Framework for Contrast Processing of High Dynamic Range Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2214-E %F EDOC: 314382 %F OTHER: Local-ID: C125675300671F7B-43FC98F7A2FC192EC1257149002E3B9A-Mantiuk2006:ContrastDomain %R 10.1145/1166087.1166095 %D 2006 %* Review method: peer-reviewed %X Image processing often involves an image transformation into a<br> domain that is better correlated with visual perception, such as the<br> wavelet domain, image pyramids, multi-scale contrast<br> representations, contrast in retinex algorithms, and chroma,<br> lightness and colorfulness predictors in color appearance models.<br> Many of these transformations are not ideally suited for image<br> processing that significantly modifies an image. For example, the<br> modification of a single band in a multi-scale model leads to an<br> unrealistic image with severe halo artifacts. Inspired by gradient<br> domain methods we derive a framework that imposes constraints on the<br> entire set of contrasts in an image for a full range of spatial<br> frequencies. This way, even severe image modifications do not<br> reverse the polarity of contrast. The strengths of the framework are<br> demonstrated by aggressive contrast enhancement and a visually<br> appealing tone mapping which does not introduce artifacts.<br> Additionally, we perceptually linearize contrast magnitudes using a<br> custom transducer function. The transducer function has been derived<br> especially for the purpose of HDR images, based on the contrast<br> discrimination measurements for high contrast stimuli. %J ACM Transactions on Applied Perception %V 3 %N 3 %& 286 %P 286 - 308 %I Association for Computing Machinery %C New York, NY %@ false
Lee, Y., Lee, S., Ivrissimtzis, I., and Seidel, H.-P. 2006. Overfitting Control for Surface Reconstruction. SGP 2006 : Fourth Eurographics Symposium on Geometry Processing, Eurographics.
Abstract
This paper proposes a general framework for overfitting control in surface <br>reconstruction from noisy point data. The problem we deal with is how to create <br>a model that will capture as much detail as possible and simultaneously avoid <br>reproducing the noise of the input points. The proposed framework is based on <br>extra-sample validation. It is fully automatic and can work in conjunction with <br>any surface reconstruction algorithm. We test the framework with a Radial Basis <br>Function algorithm, Multi-level Partition of Unity implicits, and the Power <br>Crust algorithm.
Export
BibTeX
@inproceedings{Lee-et-al_SGP06, TITLE = {Overfitting Control for Surface Reconstruction}, AUTHOR = {Lee, Yunjin and Lee, Seungyong and Ivrissimtzis, Ioannis and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-905673-36-3}, DOI = {10.2312/SGP/SGP06/231-234}, LOCALID = {Local-ID: C125675300671F7B-6200B7539E4BFA32C125729900502A05-Lee-sgp06}, PUBLISHER = {Eurographics}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {This paper proposes a general framework for overfitting control in surface <br>reconstruction from noisy point data. The problem we deal with is how to create <br>a model that will capture as much detail as possible and simultaneously avoid <br>reproducing the noise of the input points. The proposed framework is based on <br>extra-sample validation. It is fully automatic and can work in conjunction with <br>any surface reconstruction algorithm. We test the framework with a Radial Basis <br>Function algorithm, Multi-level Partition of Unity implicits, and the Power <br>Crust algorithm.}, BOOKTITLE = {SGP 2006~:~Fourth Eurographics Symposium on Geometry Processing}, EDITOR = {Fellner, Dieter W. and Spencer, Stephen N. and Sheffer, Alla and Polthier, Konrad}, PAGES = {231--234}, ADDRESS = {Cagliari, Sardinia, Italy}, }
Endnote
%0 Conference Proceedings %A Lee, Yunjin %A Lee, Seungyong %A Ivrissimtzis, Ioannis %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Overfitting Control for Surface Reconstruction : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-23AD-1 %F EDOC: 314437 %F OTHER: Local-ID: C125675300671F7B-6200B7539E4BFA32C125729900502A05-Lee-sgp06 %R 10.2312/SGP/SGP06/231-234 %D 2006 %B Fourth Eurographics Symposium on Geometry Processing %Z date of event: 2006-06-26 - 2006-06-28 %C Cagliari, Sardinia, Italy %X This paper proposes a general framework for overfitting control in surface <br>reconstruction from noisy point data. The problem we deal with is how to create <br>a model that will capture as much detail as possible and simultaneously avoid <br>reproducing the noise of the input points. The proposed framework is based on <br>extra-sample validation. It is fully automatic and can work in conjunction with <br>any surface reconstruction algorithm. We test the framework with a Radial Basis <br>Function algorithm, Multi-level Partition of Unity implicits, and the Power <br>Crust algorithm. %B SGP 2006&#160;:&#160;Fourth Eurographics Symposium on Geometry Processing %E Fellner, Dieter W.; Spencer, Stephen N.; Sheffer, Alla; Polthier, Konrad %P 231 - 234 %I Eurographics %@ 3-905673-36-3
Lau, R.W.H. and Seidel, H.-P. 2006. Guest Editors’ Introduction: Special Section on ACM VRST. IEEE Transactions on Visualization and Computer Graphics12, 2.
Export
BibTeX
@article{DBLP:journals/tvcg/LauS06, TITLE = {Guest Editors' Introduction: Special Section on {ACM} {VRST}}, AUTHOR = {Lau, Rynson W. H. and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2006.31}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {New York, NY}, YEAR = {2006}, DATE = {2006}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics}, VOLUME = {12}, NUMBER = {2}, PAGES = {129--130}, }
Endnote
%0 Journal Article %A Lau, Rynson W. H. %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Guest Editors' Introduction: Special Section on ACM VRST : %G eng %U http://hdl.handle.net/21.11116/0000-000F-4B3D-A %R 10.1109/TVCG.2006.31 %D 2006 %J IEEE Transactions on Visualization and Computer Graphics %V 12 %N 2 %& 129 %P 129 - 130 %I IEEE Computer Society %C New York, NY %@ false
Langer, T., Belyaev, A., and Seidel, H.-P. 2006. Spherical Barycentric Coordinates. SGP 2006 : Fourth Eurographics Symposium on Geometry Processing, Eurographics.
Abstract
We develop spherical barycentric coordinates. Analogous to classical, <br>planar barycentric coordinates that describe the positions of points in a plane <br>with respect to <br>the vertices of a given planar polygon, spherical barycentric coordinates <br>describe the positions<br>of points on a sphere with respect to the vertices of a given spherical <br>polygon. <br>In particular, we introduce spherical mean value coordinates that inherit many <br>good properties of their planar counterparts.<br>Furthermore, we present a construction that gives a simple and intuitive <br>geometric interpretation for <br>classical barycentric coordinates, like Wachspress coordinates, mean value <br>coordinates, and discrete <br>harmonic coordinates.<br><br>One of the most interesting consequences is the possibility to <br>construct mean value coordinates for arbitrary polygonal meshes. <br>So far, this was only possible for triangular meshes. Furthermore, spherical <br>barycentric coordinates<br>can be used for all applications where only planar barycentric coordinates were <br>available up to now.<br>They include B\'ezier surfaces, parameterization, free-form deformations, and <br>interpolation of rotations.
Export
BibTeX
@inproceedings{Langer-et-al_SGP06, TITLE = {Spherical Barycentric Coordinates}, AUTHOR = {Langer, Torsten and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-905673-36-3}, DOI = {10.2312/SGP/SGP06/081-088}, LOCALID = {Local-ID: C125675300671F7B-9144C5FF262D3F9CC12571BE00348DDF-LangerSGP06}, PUBLISHER = {Eurographics}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {We develop spherical barycentric coordinates. Analogous to classical, <br>planar barycentric coordinates that describe the positions of points in a plane <br>with respect to <br>the vertices of a given planar polygon, spherical barycentric coordinates <br>describe the positions<br>of points on a sphere with respect to the vertices of a given spherical <br>polygon. <br>In particular, we introduce spherical mean value coordinates that inherit many <br>good properties of their planar counterparts.<br>Furthermore, we present a construction that gives a simple and intuitive <br>geometric interpretation for <br>classical barycentric coordinates, like Wachspress coordinates, mean value <br>coordinates, and discrete <br>harmonic coordinates.<br><br>One of the most interesting consequences is the possibility to <br>construct mean value coordinates for arbitrary polygonal meshes. <br>So far, this was only possible for triangular meshes. Furthermore, spherical <br>barycentric coordinates<br>can be used for all applications where only planar barycentric coordinates were <br>available up to now.<br>They include B\'ezier surfaces, parameterization, free-form deformations, and <br>interpolation of rotations.}, BOOKTITLE = {SGP 2006~:~Fourth Eurographics Symposium on Geometry Processing}, EDITOR = {Fellner, Dieter W. and Spencer, Stephen N. and Sheffer, Alla and Polthier, Konrad}, PAGES = {81--88}, ADDRESS = {Cagliari, Sardinia, Italy}, }
Endnote
%0 Conference Proceedings %A Langer, Torsten %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Spherical Barycentric Coordinates : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-23FD-B %F EDOC: 314588 %F OTHER: Local-ID: C125675300671F7B-9144C5FF262D3F9CC12571BE00348DDF-LangerSGP06 %R 10.2312/SGP/SGP06/081-088 %D 2006 %B Fourth Eurographics Symposium on Geometry Processing %Z date of event: 2006-06-26 - 2006-06-28 %C Cagliari, Sardinia, Italy %X We develop spherical barycentric coordinates. Analogous to classical, <br>planar barycentric coordinates that describe the positions of points in a plane <br>with respect to <br>the vertices of a given planar polygon, spherical barycentric coordinates <br>describe the positions<br>of points on a sphere with respect to the vertices of a given spherical <br>polygon. <br>In particular, we introduce spherical mean value coordinates that inherit many <br>good properties of their planar counterparts.<br>Furthermore, we present a construction that gives a simple and intuitive <br>geometric interpretation for <br>classical barycentric coordinates, like Wachspress coordinates, mean value <br>coordinates, and discrete <br>harmonic coordinates.<br><br>One of the most interesting consequences is the possibility to <br>construct mean value coordinates for arbitrary polygonal meshes. <br>So far, this was only possible for triangular meshes. Furthermore, spherical <br>barycentric coordinates<br>can be used for all applications where only planar barycentric coordinates were <br>available up to now.<br>They include B\'ezier surfaces, parameterization, free-form deformations, and <br>interpolation of rotations. %B SGP 2006&#160;:&#160;Fourth Eurographics Symposium on Geometry Processing %E Fellner, Dieter W.; Spencer, Stephen N.; Sheffer, Alla; Polthier, Konrad %P 81 - 88 %I Eurographics %@ 3-905673-36-3
Krawczyk, G., Myszkowski, K., and Seidel, H.-P. 2006. Computational Model of Lightness Perception in High Dynamic Range Imaging. Human Vision and Electronic Imaging XI, SPIE.
Abstract
An anchoring theory of lightness perception by Gilchrist et al. [1999] explains <br>many characteristics of human visual system such as lightness constancy and its <br>spectacular failures which are important in the perception of images. The <br>principal concept of this theory is the perception of complex scenes in terms <br>of groups of consistent areas (frameworks). Such areas, following the gestalt <br>theorists, are defined by the regions of common illumination. The key aspect of <br>the image perception is the estimation of lightness within each framework <br>through the anchoring to the luminance perceived as white, followed by the <br>computation of the global lightness. In this paper we provide a computational <br>model for automatic decomposition of HDR images into frameworks. We derive a <br>tone mapping operator which predicts lightness perception of the real world <br>scenes and aims at its accurate reproduction on low dynamic range displays. <br>Furthermore, such a decomposition into frameworks opens new grounds for local <br>image analysis in view of human perception.
Export
BibTeX
@inproceedings{Krawczyk-et-al_ASEI06, TITLE = {Computational Model of Lightness Perception in High Dynamic Range Imaging}, AUTHOR = {Krawczyk, Grzegorz and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0277-786X}, DOI = {10.1117/12.639266}, LOCALID = {Local-ID: C125675300671F7B-E9AB6DE505E34EABC1257149002AB5F8-Krawczyk2006spie}, PUBLISHER = {SPIE}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {An anchoring theory of lightness perception by Gilchrist et al. [1999] explains <br>many characteristics of human visual system such as lightness constancy and its <br>spectacular failures which are important in the perception of images. The <br>principal concept of this theory is the perception of complex scenes in terms <br>of groups of consistent areas (frameworks). Such areas, following the gestalt <br>theorists, are defined by the regions of common illumination. The key aspect of <br>the image perception is the estimation of lightness within each framework <br>through the anchoring to the luminance perceived as white, followed by the <br>computation of the global lightness. In this paper we provide a computational <br>model for automatic decomposition of HDR images into frameworks. We derive a <br>tone mapping operator which predicts lightness perception of the real world <br>scenes and aims at its accurate reproduction on low dynamic range displays. <br>Furthermore, such a decomposition into frameworks opens new grounds for local <br>image analysis in view of human perception.}, BOOKTITLE = {Human Vision and Electronic Imaging XI}, EDITOR = {Rogowitz, Bernice E. and Pappas, Thrasyvoulos N. and Daly, Scott J.}, PAGES = {1--12}, SERIES = {SPIE}, VOLUME = {6057}, ADDRESS = {San Jose, CA, USA}, }
Endnote
%0 Conference Proceedings %A Krawczyk, Grzegorz %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Computational Model of Lightness Perception in High Dynamic Range Imaging : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2258-5 %F EDOC: 314537 %F OTHER: Local-ID: C125675300671F7B-E9AB6DE505E34EABC1257149002AB5F8-Krawczyk2006spie %R 10.1117/12.639266 %D 2006 %B IS&T/SPIE's 18th Annual Symposium on Electronic Imaging %Z date of event: 2006-01-15 - 2006-01-19 %C San Jose, CA, USA %X An anchoring theory of lightness perception by Gilchrist et al. [1999] explains <br>many characteristics of human visual system such as lightness constancy and its <br>spectacular failures which are important in the perception of images. The <br>principal concept of this theory is the perception of complex scenes in terms <br>of groups of consistent areas (frameworks). Such areas, following the gestalt <br>theorists, are defined by the regions of common illumination. The key aspect of <br>the image perception is the estimation of lightness within each framework <br>through the anchoring to the luminance perceived as white, followed by the <br>computation of the global lightness. In this paper we provide a computational <br>model for automatic decomposition of HDR images into frameworks. We derive a <br>tone mapping operator which predicts lightness perception of the real world <br>scenes and aims at its accurate reproduction on low dynamic range displays. <br>Furthermore, such a decomposition into frameworks opens new grounds for local <br>image analysis in view of human perception. %B Human Vision and Electronic Imaging XI %E Rogowitz, Bernice E.; Pappas, Thrasyvoulos N.; Daly, Scott J. %P 1 - 12 %I SPIE %B SPIE %N 6057 %@ false
Havran, V., Herzog, R., and Seidel, H.-P. 2006a. On the Fast Construction of Spatial Hierarchies for Ray Tracing. Proceedings of the 2006 IEEE Symposium on Interactive Ray Tracing, IEEE.
Abstract
In this paper we address the problem of fast construction of spatial hierarchies for ray tracing with applications in animated environments including non-rigid animations. We discuss properties of currently used techniques with $O(N \log N)$ construction time for kd-trees and bounding volume hierarchies. Further, we will propose a hybrid data structure blending a spatial kd-tree with bounding volume primitives. We will keep our novel hierarchical data structures algorithmically efficient and comparable with kd-trees by using a cost model based on surface area heuristics. Although the time complexity $O(N \log N)$ is a lower bound required for construction of any spatial hierarchy, which corresponds to sorting based on comparisons, using an approximate method based on space discretization, we propose a new hierarchical data structures with expected $O(N \log\log N)$ time complexity. We also discuss the constants behind the construction algorithms of spatial hierarchies that are important in practice. We document the performance of our algorithms by results obtained from nine different scenes.
Export
BibTeX
@inproceedings{HavranRT2006, TITLE = {On the Fast Construction of Spatial Hierarchies for Ray Tracing}, AUTHOR = {Havran, Vlastimil and Herzog, Robert and Seidel, Hans-Peter}, EDITOR = {Wald, Ingo and Parker, Steven G.}, LANGUAGE = {eng}, ISBN = {1-4244-0693-5}, LOCALID = {Local-ID: C125675300671F7B-94561A8D2A5BD650C125722F005A0243-HavranRT2006}, PUBLISHER = {IEEE}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {In this paper we address the problem of fast construction of spatial hierarchies for ray tracing with applications in animated environments including non-rigid animations. We discuss properties of currently used techniques with $O(N \log N)$ construction time for kd-trees and bounding volume hierarchies. Further, we will propose a hybrid data structure blending a spatial kd-tree with bounding volume primitives. We will keep our novel hierarchical data structures algorithmically efficient and comparable with kd-trees by using a cost model based on surface area heuristics. Although the time complexity $O(N \log N)$ is a lower bound required for construction of any spatial hierarchy, which corresponds to sorting based on comparisons, using an approximate method based on space discretization, we propose a new hierarchical data structures with expected $O(N \log\log N)$ time complexity. We also discuss the constants behind the construction algorithms of spatial hierarchies that are important in practice. We document the performance of our algorithms by results obtained from nine different scenes.}, BOOKTITLE = {Proceedings of the 2006 IEEE Symposium on Interactive Ray Tracing}, PAGES = {71--80}, }
Endnote
%0 Conference Proceedings %A Havran, Vlastimil %A Herzog, Robert %A Seidel, Hans-Peter %E Wald, Ingo %E Parker, Steven G. %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T On the Fast Construction of Spatial Hierarchies for Ray Tracing : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2397-2 %F EDOC: 314634 %F OTHER: Local-ID: C125675300671F7B-94561A8D2A5BD650C125722F005A0243-HavranRT2006 %I IEEE %D 2006 %B Untitled Event %Z date of event: 2006-09-18 - %C Salt Lake City, UT, USA %X In this paper we address the problem of fast construction of spatial hierarchies for ray tracing with applications in animated environments including non-rigid animations. We discuss properties of currently used techniques with $O(N \log N)$ construction time for kd-trees and bounding volume hierarchies. Further, we will propose a hybrid data structure blending a spatial kd-tree with bounding volume primitives. We will keep our novel hierarchical data structures algorithmically efficient and comparable with kd-trees by using a cost model based on surface area heuristics. Although the time complexity $O(N \log N)$ is a lower bound required for construction of any spatial hierarchy, which corresponds to sorting based on comparisons, using an approximate method based on space discretization, we propose a new hierarchical data structures with expected $O(N \log\log N)$ time complexity. We also discuss the constants behind the construction algorithms of spatial hierarchies that are important in practice. We document the performance of our algorithms by results obtained from nine different scenes. %B Proceedings of the 2006 IEEE Symposium on Interactive Ray Tracing %P 71 - 80 %I IEEE %@ 1-4244-0693-5
Havran, V., Herzog, R., and Seidel, H.-P. 2006b. On fast construction of spatial hierarchies for ray tracing. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
In this paper we address the problem of fast construction of spatial hierarchies for ray tracing with applications in animated environments including non-rigid animations. We discuss properties of currently used techniques with $O(N \log N)$ construction time for kd-trees and bounding volume hierarchies. Further, we propose a hybrid data structure blending between a spatial kd-tree and bounding volume primitives. We keep our novel hierarchical data structures algorithmically efficient and comparable with kd-trees by the use of a cost model based on surface area heuristics. Although the time complexity $O(N \log N)$ is a lower bound required for construction of any spatial hierarchy that corresponds to sorting based on comparisons, using approximate method based on discretization we propose a new hierarchical data structures with expected $O(N \log\log N)$ time complexity. We also discuss constants behind the construction algorithms of spatial hierarchies that are important in practice. We document the performance of our algorithms by results obtained from the implementation tested on nine different scenes.
Export
BibTeX
@techreport{HavranHerzogSeidel2006, TITLE = {On fast construction of spatial hierarchies for ray tracing}, AUTHOR = {Havran, Vlastimil and Herzog, Robert and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2006-4-004}, NUMBER = {MPI-I-2006-4-004}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {In this paper we address the problem of fast construction of spatial hierarchies for ray tracing with applications in animated environments including non-rigid animations. We discuss properties of currently used techniques with $O(N \log N)$ construction time for kd-trees and bounding volume hierarchies. Further, we propose a hybrid data structure blending between a spatial kd-tree and bounding volume primitives. We keep our novel hierarchical data structures algorithmically efficient and comparable with kd-trees by the use of a cost model based on surface area heuristics. Although the time complexity $O(N \log N)$ is a lower bound required for construction of any spatial hierarchy that corresponds to sorting based on comparisons, using approximate method based on discretization we propose a new hierarchical data structures with expected $O(N \log\log N)$ time complexity. We also discuss constants behind the construction algorithms of spatial hierarchies that are important in practice. We document the performance of our algorithms by results obtained from the implementation tested on nine different scenes.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Havran, Vlastimil %A Herzog, Robert %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T On fast construction of spatial hierarchies for ray tracing : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-6807-8 %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2006-4-004 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2006 %P 40 p. %X In this paper we address the problem of fast construction of spatial hierarchies for ray tracing with applications in animated environments including non-rigid animations. We discuss properties of currently used techniques with $O(N \log N)$ construction time for kd-trees and bounding volume hierarchies. Further, we propose a hybrid data structure blending between a spatial kd-tree and bounding volume primitives. We keep our novel hierarchical data structures algorithmically efficient and comparable with kd-trees by the use of a cost model based on surface area heuristics. Although the time complexity $O(N \log N)$ is a lower bound required for construction of any spatial hierarchy that corresponds to sorting based on comparisons, using approximate method based on discretization we propose a new hierarchical data structures with expected $O(N \log\log N)$ time complexity. We also discuss constants behind the construction algorithms of spatial hierarchies that are important in practice. We document the performance of our algorithms by results obtained from the implementation tested on nine different scenes. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Hasler, N., Asbach, M., Rosenhahn, B., Ohm, J.-R., and Seidel, H.-P. 2006. Physically Based Tracking of Cloth. 11th International Fall Workshop on Vision, Modeling, and Visualization 2006 ({VMV} 2006), IOS.
Abstract
In this work a method for tracking fabrics in videos is proposed which, unlike most other cloth tracking algorithms, employs an analysis-by-synthesis approach. That is tracking consists of optimising a set of parameters of a mass-sp ring model that is used to simulate the textile, defining on the one hand the fabric properties and on the other the positions of a limited number of constrained points of the simulated cloth. To improve the tracking accuracy and to overcome the inherently chaotic behaviour of the real fabric several methods to track features on the cloth's surfa ce and the best way to influence the simulation are evaluated.
Export
BibTeX
@inproceedings{HasAsbRosOhmSei06, TITLE = {Physically Based Tracking of Cloth}, AUTHOR = {Hasler, Nils and Asbach, Mark and Rosenhahn, Bodo and Ohm, Jens-Rainer and Seidel, Hans-Peter}, EDITOR = {Kobbelt, Leif and Kuhlen, Torsten and Aach, Til and Westermann, R{\"u}diger}, LANGUAGE = {eng}, ISBN = {1-58603-688-2}, LOCALID = {Local-ID: C125675300671F7B-7E515B5361C262CBC12572340077BEBC-HasAsbRosOhmSei06}, PUBLISHER = {IOS}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {In this work a method for tracking fabrics in videos is proposed which, unlike most other cloth tracking algorithms, employs an analysis-by-synthesis approach. That is tracking consists of optimising a set of parameters of a mass-sp ring model that is used to simulate the textile, defining on the one hand the fabric properties and on the other the positions of a limited number of constrained points of the simulated cloth. To improve the tracking accuracy and to overcome the inherently chaotic behaviour of the real fabric several methods to track features on the cloth's surfa ce and the best way to influence the simulation are evaluated.}, BOOKTITLE = {11th International Fall Workshop on Vision, Modeling, and Visualization 2006 ({\textbraceleft}VMV{\textbraceright} 2006)}, PAGES = {49--56}, }
Endnote
%0 Conference Proceedings %A Hasler, Nils %A Asbach, Mark %A Rosenhahn, Bodo %A Ohm, Jens-Rainer %A Seidel, Hans-Peter %E Kobbelt, Leif %E Kuhlen, Torsten %E Aach, Til %E Westermann, R&#252;diger %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Physically Based Tracking of Cloth : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-23B8-7 %F EDOC: 314481 %F OTHER: Local-ID: C125675300671F7B-7E515B5361C262CBC12572340077BEBC-HasAsbRosOhmSei06 %I IOS %D 2006 %B Untitled Event %Z date of event: 2006-11-22 - %C Aachen, Germany %X In this work a method for tracking fabrics in videos is proposed which, unlike most other cloth tracking algorithms, employs an analysis-by-synthesis approach. That is tracking consists of optimising a set of parameters of a mass-sp ring model that is used to simulate the textile, defining on the one hand the fabric properties and on the other the positions of a limited number of constrained points of the simulated cloth. To improve the tracking accuracy and to overcome the inherently chaotic behaviour of the real fabric several methods to track features on the cloth's surfa ce and the best way to influence the simulation are evaluated. %B 11th International Fall Workshop on Vision, Modeling, and Visualization 2006 ({VMV} 2006) %P 49 - 56 %I IOS %@ 1-58603-688-2
Günther, J., Friedrich, H., Seidel, H.-P., and Slusallek, P. 2006a. Interactive Ray Tracing of Skinned Animations. The Visual Computer22.
Abstract
Recent high-performance ray tracing implementations have<br>already achieved interactive performance on a single PC even<br>for highly complex scenes. However, so far these approaches<br>have been limited to mostly static scenes due to the large<br>cost of updating the necessary spatial index structures<br>after modifying scene geometry. In this paper we present an<br>approach that avoids these updates almost completely for the<br>case of skinned models as typically used in computer games.<br>We assume that the characters are built from meshes with an<br>underlying skeleton structure, where the set of joint angles<br>defines the character's pose and determines the skinning<br>parameters. Based on a sampling of the possible pose space<br>we build a static fuzzy kd-tree for each skeleton segment in<br>a fast preprocessing step. This fuzzy kd-trees are then<br>organized in a top-level kd-tree. Together with the<br>skeleton's affine transformations this multi-level kd-tree<br>allows for fast and efficient scene traversal at runtime<br>while arbitrary combinations of animation sequences can be<br>applied interactively to the joint angles. We achieve<br>real-time ray tracing performance of up to 15 frames per<br>second at $1024 \times 1024$ resolution even on a single<br>processor core.
Export
BibTeX
@article{Gunther-et-al_Vis.Comp.06, TITLE = {Interactive Ray Tracing of Skinned Animations}, AUTHOR = {G{\"u}nther, Johannes and Friedrich, Heiko and Seidel, Hans-Peter and Slusallek, Philipp}, LANGUAGE = {eng}, ISSN = {0178-2789}, DOI = {10.1007/s00371-006-0063-x}, LOCALID = {Local-ID: C125675300671F7B-D2422B791713DAFDC12571B00020F74E-guenther:06:IRTSA}, PUBLISHER = {Springer International}, ADDRESS = {Berlin}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {Recent high-performance ray tracing implementations have<br>already achieved interactive performance on a single PC even<br>for highly complex scenes. However, so far these approaches<br>have been limited to mostly static scenes due to the large<br>cost of updating the necessary spatial index structures<br>after modifying scene geometry. In this paper we present an<br>approach that avoids these updates almost completely for the<br>case of skinned models as typically used in computer games.<br>We assume that the characters are built from meshes with an<br>underlying skeleton structure, where the set of joint angles<br>defines the character's pose and determines the skinning<br>parameters. Based on a sampling of the possible pose space<br>we build a static fuzzy kd-tree for each skeleton segment in<br>a fast preprocessing step. This fuzzy kd-trees are then<br>organized in a top-level kd-tree. Together with the<br>skeleton's affine transformations this multi-level kd-tree<br>allows for fast and efficient scene traversal at runtime<br>while arbitrary combinations of animation sequences can be<br>applied interactively to the joint angles. We achieve<br>real-time ray tracing performance of up to 15 frames per<br>second at $1024 \times 1024$ resolution even on a single<br>processor core.}, JOURNAL = {The Visual Computer}, VOLUME = {22}, PAGES = {785--792}, }
Endnote
%0 Journal Article %A G&#252;nther, Johannes %A Friedrich, Heiko %A Seidel, Hans-Peter %A Slusallek, Philipp %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Interactive Ray Tracing of Skinned Animations : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-233B-0 %F EDOC: 314653 %F OTHER: Local-ID: C125675300671F7B-D2422B791713DAFDC12571B00020F74E-guenther:06:IRTSA %R 10.1007/s00371-006-0063-x %D 2006 %* Review method: peer-reviewed %X Recent high-performance ray tracing implementations have<br>already achieved interactive performance on a single PC even<br>for highly complex scenes. However, so far these approaches<br>have been limited to mostly static scenes due to the large<br>cost of updating the necessary spatial index structures<br>after modifying scene geometry. In this paper we present an<br>approach that avoids these updates almost completely for the<br>case of skinned models as typically used in computer games.<br>We assume that the characters are built from meshes with an<br>underlying skeleton structure, where the set of joint angles<br>defines the character's pose and determines the skinning<br>parameters. Based on a sampling of the possible pose space<br>we build a static fuzzy kd-tree for each skeleton segment in<br>a fast preprocessing step. This fuzzy kd-trees are then<br>organized in a top-level kd-tree. Together with the<br>skeleton's affine transformations this multi-level kd-tree<br>allows for fast and efficient scene traversal at runtime<br>while arbitrary combinations of animation sequences can be<br>applied interactively to the joint angles. We achieve<br>real-time ray tracing performance of up to 15 frames per<br>second at $1024 \times 1024$ resolution even on a single<br>processor core. %J The Visual Computer %V 22 %& 785 %P 785 - 792 %I Springer International %C Berlin %@ false %U https://rdcu.be/dH0vs
Günther, J., Friedrich, H., Wald, I., Seidel, H.-P., and Slusallek, P. 2006b. Ray Tracing Animated Scenes using Motion Decomposition. Computer Graphics Forum, European Association of Computer Graphics.
Abstract
Though ray tracing has recently become interactive, its high<br>precomputation time for building spatial indices usually<br>limits its applications to walkthroughs of static scenes.<br>This is a major limitation, as most applications demand<br>support for dynamically animated models. In this paper, we<br>present a new approach to ray trace a special but important<br>class of dynamic scenes, namely models whose connectivity<br>does not change over time and for which the space of all<br>possible poses is known in advance.<br><br>We support these kinds of models by introducing two new<br>concepts: primary motion decomposition, and fuzzy kd-trees.<br>We analyze the space of poses and break the model down into<br>submeshes with similar motion. For each of these submeshes<br>and for every time step, we calculate a best affine<br>transformation through a least square approach. Any<br>residual motion is then captured in a {\bf single} ``fuzzy<br>kd-tree'' for the entire animation.<br><br>Together, these techniques allow for ray tracing animations<br>{\em without} rebuilding the spatial index structures for<br>the submeshes, resulting in interactive frame rates of 5 to<br>15 fps even on a single CPU.
Export
BibTeX
@inproceedings{Gunther-et-al_EG06, TITLE = {Ray Tracing Animated Scenes using Motion Decomposition}, AUTHOR = {G{\"u}nther, Johannes and Friedrich, Heiko and Wald, Ingo and Seidel, Hans-Peter and Slusallek, Philipp}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2006.00971.x}, LOCALID = {Local-ID: C125675300671F7B-56356643E0EEF34EC12571B00021FC07-guenther:06:modecomp}, PUBLISHER = {European Association of Computer Graphics}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {Though ray tracing has recently become interactive, its high<br>precomputation time for building spatial indices usually<br>limits its applications to walkthroughs of static scenes.<br>This is a major limitation, as most applications demand<br>support for dynamically animated models. In this paper, we<br>present a new approach to ray trace a special but important<br>class of dynamic scenes, namely models whose connectivity<br>does not change over time and for which the space of all<br>possible poses is known in advance.<br><br>We support these kinds of models by introducing two new<br>concepts: primary motion decomposition, and fuzzy kd-trees.<br>We analyze the space of poses and break the model down into<br>submeshes with similar motion. For each of these submeshes<br>and for every time step, we calculate a best affine<br>transformation through a least square approach. Any<br>residual motion is then captured in a {\bf single} ``fuzzy<br>kd-tree'' for the entire animation.<br><br>Together, these techniques allow for ray tracing animations<br>{\em without} rebuilding the spatial index structures for<br>the submeshes, resulting in interactive frame rates of 5 to<br>15 fps even on a single CPU.}, BOOKTITLE = {Eurographics 2006 Proceedings}, EDITOR = {Szirmay-Kalos, L{\'a}szl{\'o} and Gr{\"o}ller, Eduard}, PAGES = {517--525}, JOURNAL = {Computer Graphics Forum}, VOLUME = {25}, ISSUE = {3}, ADDRESS = {Vienna, Austria}, }
Endnote
%0 Conference Proceedings %A G&#252;nther, Johannes %A Friedrich, Heiko %A Wald, Ingo %A Seidel, Hans-Peter %A Slusallek, Philipp %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Ray Tracing Animated Scenes using Motion Decomposition : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-23CB-E %F EDOC: 314550 %F OTHER: Local-ID: C125675300671F7B-56356643E0EEF34EC12571B00021FC07-guenther:06:modecomp %R 10.1111/j.1467-8659.2006.00971.x %D 2006 %Z Review method: peer-reviewed %B Eurographics 2006 %Z date of event: 2006-09-04 - 2006-09-08 %C Vienna, Austria %X Though ray tracing has recently become interactive, its high<br>precomputation time for building spatial indices usually<br>limits its applications to walkthroughs of static scenes.<br>This is a major limitation, as most applications demand<br>support for dynamically animated models. In this paper, we<br>present a new approach to ray trace a special but important<br>class of dynamic scenes, namely models whose connectivity<br>does not change over time and for which the space of all<br>possible poses is known in advance.<br><br>We support these kinds of models by introducing two new<br>concepts: primary motion decomposition, and fuzzy kd-trees.<br>We analyze the space of poses and break the model down into<br>submeshes with similar motion. For each of these submeshes<br>and for every time step, we calculate a best affine<br>transformation through a least square approach. Any<br>residual motion is then captured in a {\bf single} ``fuzzy<br>kd-tree'' for the entire animation.<br><br>Together, these techniques allow for ray tracing animations<br>{\em without} rebuilding the spatial index structures for<br>the submeshes, resulting in interactive frame rates of 5 to<br>15 fps even on a single CPU. %B Eurographics 2006 Proceedings %E Szirmay-Kalos, L&#225;szl&#243;; Gr&#246;ller, Eduard %P 517 - 525 %I European Association of Computer Graphics %J Computer Graphics Forum %V 25 %N 3 %I Blackwell-Wiley %@ false
Gall, J., Rosenhahn, B., Brox, T., and Seidel, H.-P. 2006a. Learning for Multi-view 3D Tracking in the Context of Particle Filters. Advances in Visual Computing (ISVC 2006), Springer.
Abstract
In this paper we present an approach to use prior knowledge in the particle <br>filter framework for 3D tracking, i.e. estimating the state parameters such as <br>joint angles of a 3D object. The probability of the object’s states, including <br>correlations between the state parameters, is learned a priori from training <br>samples. We introduce a framework that integrates this knowledge into the <br>family of particle filters and particularly into the annealed particle filter <br>scheme. Furthermore, we show that the annealed particle filter also works with <br>a variational model for level set based image segmentation that does not rely <br>on background subtraction and, hence, does not depend on a static background. <br>In our experiments, we use a four camera set-up for tracking the lower part of <br>a human body by a kinematic model with 18 degrees of freedom. We demonstrate <br>the increased accuracy due to the prior knowledge and the robustness of our <br>approach to image distortions. Finally, we compare the results of our <br>multi-view tracking system quantitatively to the outcome of an industrial <br>marker based tracking system.
Export
BibTeX
@inproceedings{Gall-et-al_ISVC06, TITLE = {Learning for Multi-view {3D} Tracking in the Context of Particle Filters}, AUTHOR = {Gall, J{\"u}rgen and Rosenhahn, Bodo and Brox, Thomas and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-540-48626-8}, DOI = {10.1007/11919629_7}, LOCALID = {Local-ID: C125675300671F7B-166E4030CB024E42C125722D0050F6E7-GallISVC2005}, PUBLISHER = {Springer}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {In this paper we present an approach to use prior knowledge in the particle <br>filter framework for 3D tracking, i.e. estimating the state parameters such as <br>joint angles of a 3D object. The probability of the object{\textquoteright}s states, including <br>correlations between the state parameters, is learned a priori from training <br>samples. We introduce a framework that integrates this knowledge into the <br>family of particle filters and particularly into the annealed particle filter <br>scheme. Furthermore, we show that the annealed particle filter also works with <br>a variational model for level set based image segmentation that does not rely <br>on background subtraction and, hence, does not depend on a static background. <br>In our experiments, we use a four camera set-up for tracking the lower part of <br>a human body by a kinematic model with 18 degrees of freedom. We demonstrate <br>the increased accuracy due to the prior knowledge and the robustness of our <br>approach to image distortions. Finally, we compare the results of our <br>multi-view tracking system quantitatively to the outcome of an industrial <br>marker based tracking system.}, BOOKTITLE = {Advances in Visual Computing (ISVC 2006)}, EDITOR = {Bebis, George and Boyle, Richard and Parvin, Bahram and Koracin, Darko and Remagnino, Paolo and Nefian, Ara and Meenakshisundaram, Gopi and Pascucci, Valerio and Zara, Jiri and Molineros, Jose and Theisel, Holger and Malzbender, Tom}, PAGES = {59--69}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {4292}, ADDRESS = {Lake Tahoe, NV, USA}, }
Endnote
%0 Conference Proceedings %A Gall, J&#252;rgen %A Rosenhahn, Bodo %A Brox, Thomas %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Learning for Multi-view 3D Tracking in the Context of Particle Filters : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2354-7 %F EDOC: 314442 %F OTHER: Local-ID: C125675300671F7B-166E4030CB024E42C125722D0050F6E7-GallISVC2005 %R 10.1007/11919629_7 %D 2006 %B Second International Symposium on Advances in Visual Computing %Z date of event: 2006-11-06 - 2006-11-08 %C Lake Tahoe, NV, USA %X In this paper we present an approach to use prior knowledge in the particle <br>filter framework for 3D tracking, i.e. estimating the state parameters such as <br>joint angles of a 3D object. The probability of the object&#8217;s states, including <br>correlations between the state parameters, is learned a priori from training <br>samples. We introduce a framework that integrates this knowledge into the <br>family of particle filters and particularly into the annealed particle filter <br>scheme. Furthermore, we show that the annealed particle filter also works with <br>a variational model for level set based image segmentation that does not rely <br>on background subtraction and, hence, does not depend on a static background. <br>In our experiments, we use a four camera set-up for tracking the lower part of <br>a human body by a kinematic model with 18 degrees of freedom. We demonstrate <br>the increased accuracy due to the prior knowledge and the robustness of our <br>approach to image distortions. Finally, we compare the results of our <br>multi-view tracking system quantitatively to the outcome of an industrial <br>marker based tracking system. %B Advances in Visual Computing %E Bebis, George; Boyle, Richard; Parvin, Bahram; Koracin, Darko; Remagnino, Paolo; Nefian, Ara; Meenakshisundaram, Gopi; Pascucci, Valerio; Zara, Jiri; Molineros, Jose; Theisel, Holger; Malzbender, Tom %P 59 - 69 %I Springer %@ 978-3-540-48626-8 %B Lecture Notes in Computer Science %N 4292 %U https://rdcu.be/dHM2U
Gall, J., Rosenhahn, B., and Seidel, H.-P. 2006b. Robust Pose Estimation with 3D Textured Models. Advances in Image and Video Technology (PSIVT 2006), Springer.
Abstract
Estimating the pose of a rigid body means to determine the rigid body motion in <br>the 3D space from 2D images. For this purpose, it is reasonable to make use of <br>existing knowledge of the object. Our approach exploits the 3D shape and the <br>texture of the tracked object in form of a 3D textured model to establish 3D-2D <br>correspondences for pose estimation. While the surface of the 3D free-form <br>model is matched to the contour extracted by segmentation, additional reliable <br>correspondences are obtained by matching local descriptors of interest points <br>between the textured model and the images. The fusion of these complementary <br>features provides a robust pose estimation. Moreover, the initial pose is <br>automatically detected and the pose is predicted for each frame. Using the <br>predicted pose as shape prior makes the contour extraction less sensitive. The <br>performance of our method is demonstrated by stereo tracking experiments.
Export
BibTeX
@inproceedings{Gall-et-al_PSIVT06, TITLE = {Robust Pose Estimation with {3D} Textured Models}, AUTHOR = {Gall, J{\"u}rgen and Rosenhahn, Bodo and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-540-68297-4}, DOI = {10.1007/11949534_9}, LOCALID = {Local-ID: C125675300671F7B-9DF1787D5A05525CC125724A005638AD-Gall2006c}, PUBLISHER = {Springer}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {Estimating the pose of a rigid body means to determine the rigid body motion in <br>the 3D space from 2D images. For this purpose, it is reasonable to make use of <br>existing knowledge of the object. Our approach exploits the 3D shape and the <br>texture of the tracked object in form of a 3D textured model to establish 3D-2D <br>correspondences for pose estimation. While the surface of the 3D free-form <br>model is matched to the contour extracted by segmentation, additional reliable <br>correspondences are obtained by matching local descriptors of interest points <br>between the textured model and the images. The fusion of these complementary <br>features provides a robust pose estimation. Moreover, the initial pose is <br>automatically detected and the pose is predicted for each frame. Using the <br>predicted pose as shape prior makes the contour extraction less sensitive. The <br>performance of our method is demonstrated by stereo tracking experiments.}, BOOKTITLE = {Advances in Image and Video Technology (PSIVT 2006)}, EDITOR = {Chang, Long-Wen and Lie, Wen-Nung}, PAGES = {84--95}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {4319}, ADDRESS = {Hsinchu, Taiwan}, }
Endnote
%0 Conference Proceedings %A Gall, J&#252;rgen %A Rosenhahn, Bodo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Robust Pose Estimation with 3D Textured Models : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-23E0-9 %F EDOC: 314370 %F OTHER: Local-ID: C125675300671F7B-9DF1787D5A05525CC125724A005638AD-Gall2006c %R 10.1007/11949534_9 %D 2006 %B First Pacific Rim Symposium %Z date of event: 2006-12-10 - 2006-12-13 %C Hsinchu, Taiwan %X Estimating the pose of a rigid body means to determine the rigid body motion in <br>the 3D space from 2D images. For this purpose, it is reasonable to make use of <br>existing knowledge of the object. Our approach exploits the 3D shape and the <br>texture of the tracked object in form of a 3D textured model to establish 3D-2D <br>correspondences for pose estimation. While the surface of the 3D free-form <br>model is matched to the contour extracted by segmentation, additional reliable <br>correspondences are obtained by matching local descriptors of interest points <br>between the textured model and the images. The fusion of these complementary <br>features provides a robust pose estimation. Moreover, the initial pose is <br>automatically detected and the pose is predicted for each frame. Using the <br>predicted pose as shape prior makes the contour extraction less sensitive. The <br>performance of our method is demonstrated by stereo tracking experiments. %B Advances in Image and Video Technology %E Chang, Long-Wen; Lie, Wen-Nung %P 84 - 95 %I Springer %@ 978-3-540-68297-4 %B Lecture Notes in Computer Science %N 4319 %U https://rdcu.be/dHMEr
Fuchs, C., Chen, T., Goesele, M., Theisel, H., and Seidel, H.-P. 2006. Volumetric Density Capture From a Single Image. Volume Graphics 2006 : Eurographics / IEEE VGTC Workshop Proceedings, Eurographics.
Export
BibTeX
@inproceedings{Fuchs-et-al_VGTC06, TITLE = {Volumetric Density Capture From a Single Image}, AUTHOR = {Fuchs, Christian and Chen, Tongbo and Goesele, Michael and Theisel, Holger and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-905673-41-X}, DOI = {10.2312/VG/VG06/017-022}, LOCALID = {Local-ID: C125675300671F7B-00CF74A3FF1DC4CAC125722D005D09CD-Fuchs:2006:VDS}, PUBLISHER = {Eurographics}, YEAR = {2006}, DATE = {2006}, BOOKTITLE = {Volume Graphics 2006~:~Eurographics / IEEE VGTC Workshop Proceedings}, EDITOR = {M{\"o}ller, Torsten and Machiraju, Raghu and Chen, Min-Syan and Ertl, Thomas}, PAGES = {17--22}, ADDRESS = {Boston, MA, USA}, }
Endnote
%0 Conference Proceedings %A Fuchs, Christian %A Chen, Tongbo %A Goesele, Michael %A Theisel, Holger %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Volumetric Density Capture From a Single Image : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2476-4 %F EDOC: 314545 %F OTHER: Local-ID: C125675300671F7B-00CF74A3FF1DC4CAC125722D005D09CD-Fuchs:2006:VDS %R 10.2312/VG/VG06/017-022 %D 2006 %B Fifth International Workshop on Volume Graphic %Z date of event: 2006-07-30 - 2006-07-31 %C Boston, MA, USA %B Volume Graphics 2006&#160;:&#160;Eurographics / IEEE VGTC Workshop Proceedings %E M&#246;ller, Torsten; Machiraju, Raghu; Chen, Min-Syan; Ertl, Thomas %P 17 - 22 %I Eurographics %@ 3-905673-41-X
Friedrich, H., Günther, J., Dietrich, A., Scherbaum, M., Seidel, H.-P., and Slusallek, P. 2006. Exploring the Use of Ray Tracing for Future Games. Sandbox ’06: Proceedings of the 2006 ACM SIGGRAPH Symposium on Videogames, ACM.
Abstract
Rasterization hardware and computer games have always been<br>tightly connected: The hardware implementation of<br>rasterization has made complex interactive 3D games possible<br>while requirements for future games drive the development of<br>increasingly parallel GPUs and CPUs. Interestingly, this<br>development -- together with important algorithmic<br>improvements -- also enabled \emph{ray tracing\/} to achieve<br>realtime performance recently.<br><br>In this paper we explore the opportunities offered by ray<br>tracing based game technology in the context of current and<br>expected future performance levels. In particular, we are<br>interested in simulation-based graphics that avoids<br>pre-computations and thus enables the interactive production<br>of advanced visual effects and increased realism necessary<br>for future games. In this context we analyze the advantages<br>of ray tracing and demonstrate first results from several<br>ray tracing based game projects. We also discuss ray<br>tracing API issues and present recent developments that<br>support interactions and dynamic scene content. We end with<br>an outlook on the different options for hardware<br>acceleration of ray tracing.
Export
BibTeX
@inproceedings{Friedrich-et-al_Sandbox06, TITLE = {Exploring the Use of Ray Tracing for Future Games}, AUTHOR = {Friedrich, Heiko and G{\"u}nther, Johannes and Dietrich, Andreas and Scherbaum, Michael and Seidel, Hans-Peter and Slusallek, Philipp}, LANGUAGE = {eng}, ISBN = {1-59593-386-7}, DOI = {10.1145/1183316.1183323}, LOCALID = {Local-ID: C125675300671F7B-EA8AC8137DC507D1C125715A0028DF9F-friedrich:06:RTG}, PUBLISHER = {ACM}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {Rasterization hardware and computer games have always been<br>tightly connected: The hardware implementation of<br>rasterization has made complex interactive 3D games possible<br>while requirements for future games drive the development of<br>increasingly parallel GPUs and CPUs. Interestingly, this<br>development -- together with important algorithmic<br>improvements -- also enabled \emph{ray tracing\/} to achieve<br>realtime performance recently.<br><br>In this paper we explore the opportunities offered by ray<br>tracing based game technology in the context of current and<br>expected future performance levels. In particular, we are<br>interested in simulation-based graphics that avoids<br>pre-computations and thus enables the interactive production<br>of advanced visual effects and increased realism necessary<br>for future games. In this context we analyze the advantages<br>of ray tracing and demonstrate first results from several<br>ray tracing based game projects. We also discuss ray<br>tracing API issues and present recent developments that<br>support interactions and dynamic scene content. We end with<br>an outlook on the different options for hardware<br>acceleration of ray tracing.}, BOOKTITLE = {Sandbox '06: Proceedings of the 2006 ACM SIGGRAPH Symposium on Videogames}, PAGES = {41--50}, ADDRESS = {Boston, MA, USA}, }
Endnote
%0 Conference Proceedings %A Friedrich, Heiko %A G&#252;nther, Johannes %A Dietrich, Andreas %A Scherbaum, Michael %A Seidel, Hans-Peter %A Slusallek, Philipp %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Exploring the Use of Ray Tracing for Future Games : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-22C9-9 %F EDOC: 314561 %F OTHER: Local-ID: C125675300671F7B-EA8AC8137DC507D1C125715A0028DF9F-friedrich:06:RTG %R 10.1145/1183316.1183323 %D 2006 %B 2006 ACM SIGGRAPH Cymposium on Videogames %Z date of event: 2006-07-30 - 2006-07-31 %C Boston, MA, USA %X Rasterization hardware and computer games have always been<br>tightly connected: The hardware implementation of<br>rasterization has made complex interactive 3D games possible<br>while requirements for future games drive the development of<br>increasingly parallel GPUs and CPUs. Interestingly, this<br>development -- together with important algorithmic<br>improvements -- also enabled \emph{ray tracing\/} to achieve<br>realtime performance recently.<br><br>In this paper we explore the opportunities offered by ray<br>tracing based game technology in the context of current and<br>expected future performance levels. In particular, we are<br>interested in simulation-based graphics that avoids<br>pre-computations and thus enables the interactive production<br>of advanced visual effects and increased realism necessary<br>for future games. In this context we analyze the advantages<br>of ray tracing and demonstrate first results from several<br>ray tracing based game projects. We also discuss ray<br>tracing API issues and present recent developments that<br>support interactions and dynamic scene content. We end with<br>an outlook on the different options for hardware<br>acceleration of ray tracing. %B Sandbox '06: Proceedings of the 2006 ACM SIGGRAPH Symposium on Videogames %P 41 - 50 %I ACM %@ 1-59593-386-7
Efremov, A., Mantiuk, R., Myszkowski, K., and Seidel, H.-P. 2006. Design and evaluation of backward compatible high dynamic range video compression. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
In this report we describe the details of the backward compatible high dynamic range (HDR) video compression algorithm. The algorithm is designed to facilitate a smooth transition from standard low dynamic range (LDR) video to high fidelity high dynamic range content. The HDR and the corresponding LDR video frames are decorrelated and then compressed into a single MPEG stream, which can be played on both existing DVD players and HDR-enabled devices.
Export
BibTeX
@techreport{EfremovMantiukMyszkowskiSeidel, TITLE = {Design and evaluation of backward compatible high dynamic range video compression}, AUTHOR = {Efremov, Alexander and Mantiuk, Rafal and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2006-4-001}, NUMBER = {MPI-I-2006-4-001}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {In this report we describe the details of the backward compatible high dynamic range (HDR) video compression algorithm. The algorithm is designed to facilitate a smooth transition from standard low dynamic range (LDR) video to high fidelity high dynamic range content. The HDR and the corresponding LDR video frames are decorrelated and then compressed into a single MPEG stream, which can be played on both existing DVD players and HDR-enabled devices.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Efremov, Alexander %A Mantiuk, Rafal %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Design and evaluation of backward compatible high dynamic range video compression : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-6811-0 %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2006-4-001 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2006 %P 50 p. %X In this report we describe the details of the backward compatible high dynamic range (HDR) video compression algorithm. The algorithm is designed to facilitate a smooth transition from standard low dynamic range (LDR) video to high fidelity high dynamic range content. The HDR and the corresponding LDR video frames are decorrelated and then compressed into a single MPEG stream, which can be played on both existing DVD players and HDR-enabled devices. %B Research Report / Max-Planck-Institut f&#252;r Informatik
De Aguiar, E., Theobalt, C., and Seidel, H.-P. 2006a. Automatic Learning of Articulated Skeletons from 3D Marker Trajectories. Advances in Visual Computing (ISVC 2006), Springer.
Abstract
We present a novel fully-automatic approach for estimating an articulated <br>skeleton of a moving subject and its motion from body marker trajectories that <br>have been measured with an optical motion capture system. Our method does not <br>require a priori information about the shape and proportions of the tracked <br>subject, can be applied to arbitrary motion sequences, and renders dedicated <br>initialization poses unnecessary. To serve this purpose, our algorithm first <br>identifies individual rigid bodies by means of a variant of spectral <br>clustering. Thereafter, it determines joint positions at each time step of <br>motion through numerical optimization, reconstructs the skeleton topology, and <br>finally enforces fixed bone length constraints. Through experiments, we <br>demonstrate the robustness and effciency of our algorithm and show that it <br>outperforms related methods from the literature in terms of accuracy and speed.
Export
BibTeX
@inproceedings{Theobalt-at-al_ISVC06, TITLE = {Automatic Learning of Articulated Skeletons from {3D} Marker Trajectories}, AUTHOR = {de Aguiar, Edilson and Theobalt, Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-540-48628-3}, DOI = {10.1007/11919476_49}, LOCALID = {Local-ID: C125675300671F7B-3BC4918875803420C125725800556F43-deAguiar_isvc2006}, PUBLISHER = {Springer}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {We present a novel fully-automatic approach for estimating an articulated <br>skeleton of a moving subject and its motion from body marker trajectories that <br>have been measured with an optical motion capture system. Our method does not <br>require a priori information about the shape and proportions of the tracked <br>subject, can be applied to arbitrary motion sequences, and renders dedicated <br>initialization poses unnecessary. To serve this purpose, our algorithm first <br>identifies individual rigid bodies by means of a variant of spectral <br>clustering. Thereafter, it determines joint positions at each time step of <br>motion through numerical optimization, reconstructs the skeleton topology, and <br>finally enforces fixed bone length constraints. Through experiments, we <br>demonstrate the robustness and effciency of our algorithm and show that it <br>outperforms related methods from the literature in terms of accuracy and speed.}, BOOKTITLE = {Advances in Visual Computing (ISVC 2006)}, EDITOR = {Bebis, George and Boyle, Richard and Parvin, Bahram and Koracin, Darko and Remagnino, Paolo and Nefian, Ara and Meenakshisundaram, Gopi and Pascucci, Valerio and Zara, Jiri and Molineros, Jose and Theisel, Holger and Malzbender, Tom}, PAGES = {485--494}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {4291}, ADDRESS = {Lake Tahoe, NV, USA}, }
Endnote
%0 Conference Proceedings %A de Aguiar, Edilson %A Theobalt, Christian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Programming Logics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Automatic Learning of Articulated Skeletons from 3D Marker Trajectories : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2235-4 %F EDOC: 314596 %F OTHER: Local-ID: C125675300671F7B-3BC4918875803420C125725800556F43-deAguiar_isvc2006 %R 10.1007/11919476_49 %D 2006 %B Second International Symposium on Advances in Visual Computing %Z date of event: 2006-11-06 - 2006-11-08 %C Lake Tahoe, NV, USA %X We present a novel fully-automatic approach for estimating an articulated <br>skeleton of a moving subject and its motion from body marker trajectories that <br>have been measured with an optical motion capture system. Our method does not <br>require a priori information about the shape and proportions of the tracked <br>subject, can be applied to arbitrary motion sequences, and renders dedicated <br>initialization poses unnecessary. To serve this purpose, our algorithm first <br>identifies individual rigid bodies by means of a variant of spectral <br>clustering. Thereafter, it determines joint positions at each time step of <br>motion through numerical optimization, reconstructs the skeleton topology, and <br>finally enforces fixed bone length constraints. Through experiments, we <br>demonstrate the robustness and effciency of our algorithm and show that it <br>outperforms related methods from the literature in terms of accuracy and speed. %B Advances in Visual Computing %E Bebis, George; Boyle, Richard; Parvin, Bahram; Koracin, Darko; Remagnino, Paolo; Nefian, Ara; Meenakshisundaram, Gopi; Pascucci, Valerio; Zara, Jiri; Molineros, Jose; Theisel, Holger; Malzbender, Tom %P 485 - 494 %I Springer %@ 3-540-48628-3 %B Lecture Notes in Computer Science %N 4291 %U https://rdcu.be/dHMNL
De Aguiar, E., Zayer, R., Theobalt, C., Magnor, M.A., and Seidel, H.-P. 2006b. A framework for natural animation of digitized models. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
We present a novel versatile, fast and simple framework to generate highquality animations of scanned human characters from input motion data. Our method is purely mesh-based and, in contrast to skeleton-based animation, requires only a minimum of manual interaction. The only manual step that is required to create moving virtual people is the placement of a sparse set of correspondences between triangles of an input mesh and triangles of the mesh to be animated. The proposed algorithm implicitly generates realistic body deformations, and can easily transfer motions between human erent shape and proportions. erent types of input data, e.g. other animated meshes and motion capture les, in just the same way. Finally, and most importantly, it creates animations at interactive frame rates. We feature two working prototype systems that demonstrate that our method can generate lifelike character animations from both marker-based and marker-less optical motion capture data.
Export
BibTeX
@techreport{deAguiarZayerTheobaltMagnorSeidel2006, TITLE = {A framework for natural animation of digitized models}, AUTHOR = {de Aguiar, Edilson and Zayer, Rhaleb and Theobalt, Christian and Magnor, Marcus A. and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2006-4-003}, NUMBER = {MPI-I-2006-4-003}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {We present a novel versatile, fast and simple framework to generate highquality animations of scanned human characters from input motion data. Our method is purely mesh-based and, in contrast to skeleton-based animation, requires only a minimum of manual interaction. The only manual step that is required to create moving virtual people is the placement of a sparse set of correspondences between triangles of an input mesh and triangles of the mesh to be animated. The proposed algorithm implicitly generates realistic body deformations, and can easily transfer motions between human erent shape and proportions. erent types of input data, e.g. other animated meshes and motion capture les, in just the same way. Finally, and most importantly, it creates animations at interactive frame rates. We feature two working prototype systems that demonstrate that our method can generate lifelike character animations from both marker-based and marker-less optical motion capture data.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A de Aguiar, Edilson %A Zayer, Rhaleb %A Theobalt, Christian %A Magnor, Marcus A. %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A framework for natural animation of digitized models : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-680B-F %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2006-4-003 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2006 %P 27 p. %X We present a novel versatile, fast and simple framework to generate highquality animations of scanned human characters from input motion data. Our method is purely mesh-based and, in contrast to skeleton-based animation, requires only a minimum of manual interaction. The only manual step that is required to create moving virtual people is the placement of a sparse set of correspondences between triangles of an input mesh and triangles of the mesh to be animated. The proposed algorithm implicitly generates realistic body deformations, and can easily transfer motions between human erent shape and proportions. erent types of input data, e.g. other animated meshes and motion capture les, in just the same way. Finally, and most importantly, it creates animations at interactive frame rates. We feature two working prototype systems that demonstrate that our method can generate lifelike character animations from both marker-based and marker-less optical motion capture data. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Chen, T., Goesele, M., and Seidel, H.-P. 2006. Mesostructure from Specularity. 2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR 2006), IEEE.
Abstract
We describe a simple and robust method for surface<br>mesostructure acquisition. Our method builds on the observation<br>that specular reflection is a reliable visual cue<br>for surface mesostructure perception. In contrast to most<br>photometric stereo methods, which take specularities as<br>outliers and discard them, we propose a progressive acquisition<br>system that captures a dense specularity field as<br>the only information for mesostructure reconstruction. Our<br>method can efficiently recover surfaces with fine-scale geometric<br>details from complex real-world objects with a wide<br>variety of reflection properties, including translucent, low<br>albedo, and highly specular objects. We show results for a<br>variety of objects including human skin, dried apricot, orange,<br>jelly candy, black leather and dark chocolate.
Export
BibTeX
@inproceedings{Chen2006MFS, TITLE = {Mesostructure from Specularity}, AUTHOR = {Chen, Tongbo and Goesele, Michael and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-2597-0}, DOI = {10.1109/CVPR.2006.182}, LOCALID = {Local-ID: C125675300671F7B-2E4D5B68F463C156C12571A300477D64-Chen:2006:MFS}, PUBLISHER = {IEEE}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {We describe a simple and robust method for surface<br>mesostructure acquisition. Our method builds on the observation<br>that specular reflection is a reliable visual cue<br>for surface mesostructure perception. In contrast to most<br>photometric stereo methods, which take specularities as<br>outliers and discard them, we propose a progressive acquisition<br>system that captures a dense specularity field as<br>the only information for mesostructure reconstruction. Our<br>method can efficiently recover surfaces with fine-scale geometric<br>details from complex real-world objects with a wide<br>variety of reflection properties, including translucent, low<br>albedo, and highly specular objects. We show results for a<br>variety of objects including human skin, dried apricot, orange,<br>jelly candy, black leather and dark chocolate.}, BOOKTITLE = {2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR 2006)}, PAGES = {1825--1832}, ADDRESS = {New York, NY, USA}, }
Endnote
%0 Conference Proceedings %A Chen, Tongbo %A Goesele, Michael %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Mesostructure from Specularity : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2368-C %F EDOC: 314612 %F OTHER: Local-ID: C125675300671F7B-2E4D5B68F463C156C12571A300477D64-Chen:2006:MFS %R 10.1109/CVPR.2006.182 %D 2006 %B IEEE Computer Society Conference on Computer Vision and Pattern Recognition %Z date of event: 2006-06-17 - 2006-06-22 %C New York, NY, USA %X We describe a simple and robust method for surface<br>mesostructure acquisition. Our method builds on the observation<br>that specular reflection is a reliable visual cue<br>for surface mesostructure perception. In contrast to most<br>photometric stereo methods, which take specularities as<br>outliers and discard them, we propose a progressive acquisition<br>system that captures a dense specularity field as<br>the only information for mesostructure reconstruction. Our<br>method can efficiently recover surfaces with fine-scale geometric<br>details from complex real-world objects with a wide<br>variety of reflection properties, including translucent, low<br>albedo, and highly specular objects. We show results for a<br>variety of objects including human skin, dried apricot, orange,<br>jelly candy, black leather and dark chocolate. %B 2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition %P 1825 - 1832 %I IEEE %@ 0-7695-2597-0
Brox, T., Rosenhahn, B., Cremers, D., and Seidel, H.-P. 2006. High Accuracy Optical Flow Serves 3-D Pose Tracking: Exploiting Contour and Flow Based Constraints. Computer Vision -- ECCV 2006, Springer.
Abstract
Tracking the 3-D pose of an object needs correspondences between 2-D features <br>in the image and their 3-D counterparts in the object model. A large variety of <br>such features has been suggested in the literature. All of them have drawbacks <br>in one situation or the other since their extraction in the image and/or the <br>matching is prone to errors. In this paper, we propose to use two complementary <br>types of features for pose tracking, such that one type makes up for the <br>shortcomings of the other. Aside from the object contour, which is matched to a <br>free-form object surface, we suggest to employ the optic flow in order to <br>compute additional point correspondences. Optic flow estimation is a mature <br>research field with sophisticated algorithms available. Using here a high <br>quality method ensures a reliable matching. In our experiments we demonstrate <br>the performance of our method and in particular the improvements due to the <br>optic flow. <br>We gratefully acknowledge funding by the German Research Foundation (DFG) and <br>the Max Planck Center for Visual Computing and Communication.
Export
BibTeX
@inproceedings{Brox-et-al_ECCV06, TITLE = {High Accuracy Optical Flow Serves 3-D Pose Tracking: Exploiting Contour and Flow Based Constraints}, AUTHOR = {Brox, Thomas and Rosenhahn, Bodo and Cremers, Daniel and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-540-33834-5}, DOI = {10.1007/11744047_8}, LOCALID = {Local-ID: C125675300671F7B-4A30277A8F182676C125722D0051A663-RosenhahnECCV2006}, PUBLISHER = {Springer}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {Tracking the 3-D pose of an object needs correspondences between 2-D features <br>in the image and their 3-D counterparts in the object model. A large variety of <br>such features has been suggested in the literature. All of them have drawbacks <br>in one situation or the other since their extraction in the image and/or the <br>matching is prone to errors. In this paper, we propose to use two complementary <br>types of features for pose tracking, such that one type makes up for the <br>shortcomings of the other. Aside from the object contour, which is matched to a <br>free-form object surface, we suggest to employ the optic flow in order to <br>compute additional point correspondences. Optic flow estimation is a mature <br>research field with sophisticated algorithms available. Using here a high <br>quality method ensures a reliable matching. In our experiments we demonstrate <br>the performance of our method and in particular the improvements due to the <br>optic flow. <br>We gratefully acknowledge funding by the German Research Foundation (DFG) and <br>the Max Planck Center for Visual Computing and Communication.}, BOOKTITLE = {Computer Vision -- ECCV 2006}, EDITOR = {Leonarids, Ales and Bishof, Horst and Prinz, Axel}, PAGES = {98--111}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {3952}, ADDRESS = {Graz, Austria}, }
Endnote
%0 Conference Proceedings %A Brox, Thomas %A Rosenhahn, Bodo %A Cremers, Daniel %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T High Accuracy Optical Flow Serves 3-D Pose Tracking: Exploiting Contour and Flow Based Constraints : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2312-A %F EDOC: 314597 %F OTHER: Local-ID: C125675300671F7B-4A30277A8F182676C125722D0051A663-RosenhahnECCV2006 %R 10.1007/11744047_8 %D 2006 %B 9th European Conference on Computer Vision %Z date of event: 2006-05-07 - 2006-05-13 %C Graz, Austria %X Tracking the 3-D pose of an object needs correspondences between 2-D features <br>in the image and their 3-D counterparts in the object model. A large variety of <br>such features has been suggested in the literature. All of them have drawbacks <br>in one situation or the other since their extraction in the image and/or the <br>matching is prone to errors. In this paper, we propose to use two complementary <br>types of features for pose tracking, such that one type makes up for the <br>shortcomings of the other. Aside from the object contour, which is matched to a <br>free-form object surface, we suggest to employ the optic flow in order to <br>compute additional point correspondences. Optic flow estimation is a mature <br>research field with sophisticated algorithms available. Using here a high <br>quality method ensures a reliable matching. In our experiments we demonstrate <br>the performance of our method and in particular the improvements due to the <br>optic flow. <br>We gratefully acknowledge funding by the German Research Foundation (DFG) and <br>the Max Planck Center for Visual Computing and Communication. %B Computer Vision -- ECCV 2006 %E Leonarids, Ales; Bishof, Horst; Prinz, Axel %P 98 - 111 %I Springer %@ 978-3-540-33834-5 %B Lecture Notes in Computer Science %N 3952 %U https://rdcu.be/dHSST
Blanz, V., Albrecht, I., Haber, J., and Seidel, H.-P. 2006a. Creating Face Models from Vague Mental Images. Computer Graphics Forum25, 3.
Export
BibTeX
@article{Blanz-et-al_CGF06, TITLE = {Creating Face Models from Vague Mental Images}, AUTHOR = {Blanz, Volker and Albrecht, Irene and Haber, J{\"o}rg and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2006.00984.x}, LOCALID = {Local-ID: C125675300671F7B-6B18A52A3F89B1BCC12571E7002B9B1B-BlaAlbHabSeid06}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2006}, DATE = {2006}, JOURNAL = {Computer Graphics Forum}, EDITOR = {Szirmay-Kalos, L{\'a}szl{\'o} and Gr{\"o}ller, Eduard}, VOLUME = {25}, NUMBER = {3}, PAGES = {645--654}, }
Endnote
%0 Journal Article %A Blanz, Volker %A Albrecht, Irene %A Haber, J&#246;rg %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Creating Face Models from Vague Mental Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2279-E %F EDOC: 314418 %F OTHER: Local-ID: C125675300671F7B-6B18A52A3F89B1BCC12571E7002B9B1B-BlaAlbHabSeid06 %R 10.1111/j.1467-8659.2006.00984.x %D 2006 %* Review method: peer-reviewed %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 25 %N 3 %& 645 %P 645 - 654 %I Blackwell-Wiley %C Oxford %@ false
Blanz, V., Albrecht, I., Haber, J., and Seidel, H.-P. 2006b. Creating Face Models from Vague Mental Images. EUROGRAPHICS 2006, Blackwell.
Abstract
We present a novel approach to create plausible 3D face models from vague recollections or incomplete descriptions. This task plays an important role in police work, where composite facial images of suspects need to be created from vague descriptions given by the eyewitnesses of an incident. Our approach is based on a morphable model of 3D faces and takes into account correlations among facial features based on human anatomy and ethnicity. Using these correlations, unspeci?ed parts of the target face are automatically completed to yield a coherent face model. The system uses a novel paradigm for navigating face space and provides high-level control of facial attributes as well as the possibility to import facial features from a database. In addition, the user can specify a set of attribute constraints that are used to restrict the target face to a residual subspace. These constraints can also be enforced on the example faces in the database, bringing their appearance closer to the mental image of the user, and thus avoiding confusing exposure to entirely different faces. We also propose a novel approach for adapting the system to local populations based on additional image databases that are converted into our 3D representation by automated shape reconstruction. We demonstrate the applicability of our system in a simulated forensic scenario and compare our results with those obtained by a professional forensic artist using state-of-the-art software for creating composite images in police work.
Export
BibTeX
@inproceedings{Albrecht_EG2006.pdf, TITLE = {Creating Face Models from Vague Mental Images}, AUTHOR = {Blanz, Volker and Albrecht, Irene and Haber, J{\"o}rg and Seidel, Hans-Peter}, LANGUAGE = {eng}, LOCALID = {Local-ID: C12573CC004A8E26-AD6499C05009DA41C12573110044D43D-Albrecht_EG2006.pdf}, PUBLISHER = {Blackwell}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {We present a novel approach to create plausible 3D face models from vague recollections or incomplete descriptions. This task plays an important role in police work, where composite facial images of suspects need to be created from vague descriptions given by the eyewitnesses of an incident. Our approach is based on a morphable model of 3D faces and takes into account correlations among facial features based on human anatomy and ethnicity. Using these correlations, unspeci?ed parts of the target face are automatically completed to yield a coherent face model. The system uses a novel paradigm for navigating face space and provides high-level control of facial attributes as well as the possibility to import facial features from a database. In addition, the user can specify a set of attribute constraints that are used to restrict the target face to a residual subspace. These constraints can also be enforced on the example faces in the database, bringing their appearance closer to the mental image of the user, and thus avoiding confusing exposure to entirely different faces. We also propose a novel approach for adapting the system to local populations based on additional image databases that are converted into our 3D representation by automated shape reconstruction. We demonstrate the applicability of our system in a simulated forensic scenario and compare our results with those obtained by a professional forensic artist using state-of-the-art software for creating composite images in police work.}, BOOKTITLE = {EUROGRAPHICS 2006}, EDITOR = {Gr{\"o}ller, Eduard and Szirmay-Kalos, L{\'a}szl{\'o}}, PAGES = {645--654}, SERIES = {Computer Graphics Forum}, }
Endnote
%0 Conference Proceedings %A Blanz, Volker %A Albrecht, Irene %A Haber, J&#246;rg %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Creating Face Models from Vague Mental Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-248B-5 %F EDOC: 356540 %F OTHER: Local-ID: C12573CC004A8E26-AD6499C05009DA41C12573110044D43D-Albrecht_EG2006.pdf %I Blackwell %D 2006 %B Untitled Event %Z date of event: 2006-09-04 - 2006-09-08 %C Vienna, Austria %X We present a novel approach to create plausible 3D face models from vague recollections or incomplete descriptions. This task plays an important role in police work, where composite facial images of suspects need to be created from vague descriptions given by the eyewitnesses of an incident. Our approach is based on a morphable model of 3D faces and takes into account correlations among facial features based on human anatomy and ethnicity. Using these correlations, unspeci?ed parts of the target face are automatically completed to yield a coherent face model. The system uses a novel paradigm for navigating face space and provides high-level control of facial attributes as well as the possibility to import facial features from a database. In addition, the user can specify a set of attribute constraints that are used to restrict the target face to a residual subspace. These constraints can also be enforced on the example faces in the database, bringing their appearance closer to the mental image of the user, and thus avoiding confusing exposure to entirely different faces. We also propose a novel approach for adapting the system to local populations based on additional image databases that are converted into our 3D representation by automated shape reconstruction. We demonstrate the applicability of our system in a simulated forensic scenario and compare our results with those obtained by a professional forensic artist using state-of-the-art software for creating composite images in police work. %B EUROGRAPHICS 2006 %E Gr&#246;ller, Eduard; Szirmay-Kalos, L&#225;szl&#243; %P 645 - 654 %I Blackwell %B Computer Graphics Forum
Belyaev, A., Langer, T., and Seidel, H.-P. 2006a. Mean value coordinates for arbitrary spherical polygons and polyhedra in $\mathbb{R}^{3}$. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
Since their introduction, mean value coordinates enjoy ever increasing popularity in computer graphics and computational mathematics because they exhibit a variety of good properties. Most importantly, they are defined in the whole plane which allows interpolation and extrapolation without restrictions. Recently, mean value coordinates were generalized to spheres and to $\mathbb{R}^{3}$. We show that these spherical and 3D mean value coordinates are well-defined on the whole sphere and the whole space $\mathbb{R}^{3}$, respectively.
Export
BibTeX
@techreport{BelyaevLangerSeidel2006, TITLE = {Mean value coordinates for arbitrary spherical polygons and polyhedra in \${\textbackslash}mathbb{\textbraceleft}R{\textbraceright}{\textasciicircum}{\textbraceleft}3{\textbraceright}\$}, AUTHOR = {Belyaev, Alexander and Langer, Torsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2006-4-010}, NUMBER = {MPI-I-2006-4-010}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {Since their introduction, mean value coordinates enjoy ever increasing popularity in computer graphics and computational mathematics because they exhibit a variety of good properties. Most importantly, they are defined in the whole plane which allows interpolation and extrapolation without restrictions. Recently, mean value coordinates were generalized to spheres and to $\mathbb{R}^{3}$. We show that these spherical and 3D mean value coordinates are well-defined on the whole sphere and the whole space $\mathbb{R}^{3}$, respectively.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Belyaev, Alexander %A Langer, Torsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Mean value coordinates for arbitrary spherical polygons and polyhedra in $\mathbb{R}^{3}$ : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-671C-2 %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2006-4-010 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2006 %P 19 p. %X Since their introduction, mean value coordinates enjoy ever increasing popularity in computer graphics and computational mathematics because they exhibit a variety of good properties. Most importantly, they are defined in the whole plane which allows interpolation and extrapolation without restrictions. Recently, mean value coordinates were generalized to spheres and to $\mathbb{R}^{3}$. We show that these spherical and 3D mean value coordinates are well-defined on the whole sphere and the whole space $\mathbb{R}^{3}$, respectively. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Belyaev, A., Yoshizawa, S., and Seidel, H.-P. 2006b. Skeleton-driven Laplacian Mesh Deformations. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
In this report, a new free-form shape deformation approach is proposed. We combine a skeleton-driven mesh deformation technique with discrete differential coordinates in order to create natural-looking global shape deformations. Given a triangle mesh, we first extract a skeletal mesh, a two-sided Voronoi-based approximation of the medial axis. Next the skeletal mesh is modified by free-form deformations. Then a desired global shape deformation is obtained by reconstructing the shape corresponding to the deformed skeletal mesh. The reconstruction is based on using discrete differential coordinates. Our method preserves fine geometric details and original shape thickness because of using discrete differential coordinates and skeleton-driven deformations. We also develop a new mesh evolution technique which allow us to eliminate possible global and local self-intersections of the deformed mesh while preserving fine geometric details. Finally, we present a multiresolution version of our approach in order to simplify and accelerate the deformation process.
Export
BibTeX
@techreport{BelyaevSeidelShin2006, TITLE = {Skeleton-driven {Laplacian} Mesh Deformations}, AUTHOR = {Belyaev, Alexander and Yoshizawa, Shin and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2006-4-005}, NUMBER = {MPI-I-2006-4-005}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {In this report, a new free-form shape deformation approach is proposed. We combine a skeleton-driven mesh deformation technique with discrete differential coordinates in order to create natural-looking global shape deformations. Given a triangle mesh, we first extract a skeletal mesh, a two-sided Voronoi-based approximation of the medial axis. Next the skeletal mesh is modified by free-form deformations. Then a desired global shape deformation is obtained by reconstructing the shape corresponding to the deformed skeletal mesh. The reconstruction is based on using discrete differential coordinates. Our method preserves fine geometric details and original shape thickness because of using discrete differential coordinates and skeleton-driven deformations. We also develop a new mesh evolution technique which allow us to eliminate possible global and local self-intersections of the deformed mesh while preserving fine geometric details. Finally, we present a multiresolution version of our approach in order to simplify and accelerate the deformation process.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Belyaev, Alexander %A Yoshizawa, Shin %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Skeleton-driven Laplacian Mesh Deformations : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-67FF-6 %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2006-4-005 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2006 %P 37 p. %X In this report, a new free-form shape deformation approach is proposed. We combine a skeleton-driven mesh deformation technique with discrete differential coordinates in order to create natural-looking global shape deformations. Given a triangle mesh, we first extract a skeletal mesh, a two-sided Voronoi-based approximation of the medial axis. Next the skeletal mesh is modified by free-form deformations. Then a desired global shape deformation is obtained by reconstructing the shape corresponding to the deformed skeletal mesh. The reconstruction is based on using discrete differential coordinates. Our method preserves fine geometric details and original shape thickness because of using discrete differential coordinates and skeleton-driven deformations. We also develop a new mesh evolution technique which allow us to eliminate possible global and local self-intersections of the deformed mesh while preserving fine geometric details. Finally, we present a multiresolution version of our approach in order to simplify and accelerate the deformation process. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Bargmann, R., Blanz, V., and Seidel, H.-P. 2006. Learning-Based Facial Rearticulation Using Streams of 3D Scans. The 14th Pacific Conference on Computer Graphics and Applications, National Taiwan University.
Abstract
In this paper, we present a new approach that generates synthetic mouth articulations from an audio file and that transfers them to different face meshes. It is based on learning articulations from a stream of 3D scans of a real person acquired by a structured light scanner at 40 three-dimensional frames per second. Correspondence between these scans over several speech sequences is established via optical flow. We propose a novel type of Principal Component Analysis that considers variances only in a sub-region of the face, while retaining the full dimensionality of the original vector space of sample scans. Audio is recorded at the same time, so the head scans can be synchronized with phoneme and viseme information for computing viseme clusters. Given a new audio sequence along with text data, we are able to quickly create in a fully automated fashion an animation synchronized with that new sentence by morphing between the visemes along a path in viseme-space. The methods described in the paper include an automated process for data analysis in streams of 3D scans, and a framework that connects the system to existing static face modeling technology for articulation transfer.
Export
BibTeX
@inproceedings{Bargmann2005, TITLE = {Learning-Based Facial Rearticulation Using Streams of {3D} Scans}, AUTHOR = {Bargmann, Robert and Blanz, Volker and Seidel, Hans-Peter}, EDITOR = {Chen, Bing-Yu}, LANGUAGE = {eng}, LOCALID = {Local-ID: C125675300671F7B-A5BB7F7B58C3D91FC125723A002F550A-Bargmann2005}, PUBLISHER = {National Taiwan University}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {In this paper, we present a new approach that generates synthetic mouth articulations from an audio file and that transfers them to different face meshes. It is based on learning articulations from a stream of 3D scans of a real person acquired by a structured light scanner at 40 three-dimensional frames per second. Correspondence between these scans over several speech sequences is established via optical flow. We propose a novel type of Principal Component Analysis that considers variances only in a sub-region of the face, while retaining the full dimensionality of the original vector space of sample scans. Audio is recorded at the same time, so the head scans can be synchronized with phoneme and viseme information for computing viseme clusters. Given a new audio sequence along with text data, we are able to quickly create in a fully automated fashion an animation synchronized with that new sentence by morphing between the visemes along a path in viseme-space. The methods described in the paper include an automated process for data analysis in streams of 3D scans, and a framework that connects the system to existing static face modeling technology for articulation transfer.}, BOOKTITLE = {The 14th Pacific Conference on Computer Graphics and Applications}, PAGES = {232--241}, }
Endnote
%0 Conference Proceedings %A Bargmann, Robert %A Blanz, Volker %A Seidel, Hans-Peter %E Chen, Bing-Yu %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Learning-Based Facial Rearticulation Using Streams of 3D Scans : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2352-B %F EDOC: 314524 %F OTHER: Local-ID: C125675300671F7B-A5BB7F7B58C3D91FC125723A002F550A-Bargmann2005 %I National Taiwan University %D 2006 %B Untitled Event %Z date of event: 2006-10-11 - %C Taipei, Taiwan %X In this paper, we present a new approach that generates synthetic mouth articulations from an audio file and that transfers them to different face meshes. It is based on learning articulations from a stream of 3D scans of a real person acquired by a structured light scanner at 40 three-dimensional frames per second. Correspondence between these scans over several speech sequences is established via optical flow. We propose a novel type of Principal Component Analysis that considers variances only in a sub-region of the face, while retaining the full dimensionality of the original vector space of sample scans. Audio is recorded at the same time, so the head scans can be synchronized with phoneme and viseme information for computing viseme clusters. Given a new audio sequence along with text data, we are able to quickly create in a fully automated fashion an animation synchronized with that new sentence by morphing between the visemes along a path in viseme-space. The methods described in the paper include an automated process for data analysis in streams of 3D scans, and a framework that connects the system to existing static face modeling technology for articulation transfer. %B The 14th Pacific Conference on Computer Graphics and Applications %P 232 - 241 %I National Taiwan University
Annen, T., Matusik, W., Zwicker, M., Pfister, H., and Seidel, H.-P. 2006. Distributed Rendering for Multiview Parallax Displays. Proceedings of Stereoscopic Displays and Virtual Reality Systems XIII, SPIE.
Abstract
3D display technology holds great promise for the future of television, virtual reality, entertainment, and visualization. Multiview parallax displays deliver stereoscopic views without glasses to arbitrary positions within the viewing zone. These systems must include a high-performance and scalable 3D rendering subsystem in order to generate multiple views at real-time frame rates. This paper describes a distributed rendering system for large-scale multiview parallax displays built with a network of PCs, commodity graphics accelerators, multiple projectors, and multiview screens. The main challenge is to render various perspective views of the scene and assign rendering tasks effectively. In this paper we investigate two different approaches: Optical multiplexing for lenticular screens and software multiplexing for parallax-barrier displays. We describe the construction of largescale multi-projector 3D display systems using lenticular and parallax-barrier technology. We have developed different distributed rendering algorithms using the Chromium stream-processing framework and evaluate the trade-offs and performance bottlenecks. Our results show that Chromium is well suited for interactive rendering on multiview parallax displays.
Export
BibTeX
@inproceedings{Annen:SPIE:2006, TITLE = {Distributed Rendering for Multiview Parallax Displays}, AUTHOR = {Annen, Thomas and Matusik, Wojciech and Zwicker, Matthias and Pfister, Hanspeter and Seidel, Hans-Peter}, EDITOR = {Woods, Andrew J. and Dodgson, Neil A. and Merritt, John O. and Bolas, Mark T. and McDowall, Ian E.}, LANGUAGE = {eng}, ISBN = {0-8194-6095-8}, LOCALID = {Local-ID: C125675300671F7B-2539C4BE55A0B69FC125712A0048AA71-Annen:SPIE:2006}, PUBLISHER = {SPIE}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {3D display technology holds great promise for the future of television, virtual reality, entertainment, and visualization. Multiview parallax displays deliver stereoscopic views without glasses to arbitrary positions within the viewing zone. These systems must include a high-performance and scalable 3D rendering subsystem in order to generate multiple views at real-time frame rates. This paper describes a distributed rendering system for large-scale multiview parallax displays built with a network of PCs, commodity graphics accelerators, multiple projectors, and multiview screens. The main challenge is to render various perspective views of the scene and assign rendering tasks effectively. In this paper we investigate two different approaches: Optical multiplexing for lenticular screens and software multiplexing for parallax-barrier displays. We describe the construction of largescale multi-projector 3D display systems using lenticular and parallax-barrier technology. We have developed different distributed rendering algorithms using the Chromium stream-processing framework and evaluate the trade-offs and performance bottlenecks. Our results show that Chromium is well suited for interactive rendering on multiview parallax displays.}, BOOKTITLE = {Proceedings of Stereoscopic Displays and Virtual Reality Systems XIII}, PAGES = {231--240}, SERIES = {SPIE}, }
Endnote
%0 Conference Proceedings %A Annen, Thomas %A Matusik, Wojciech %A Zwicker, Matthias %A Pfister, Hanspeter %A Seidel, Hans-Peter %E Woods, Andrew J. %E Dodgson, Neil A. %E Merritt, John O. %E Bolas, Mark T. %E McDowall, Ian E. %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Distributed Rendering for Multiview Parallax Displays : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-229F-9 %F EDOC: 314438 %F OTHER: Local-ID: C125675300671F7B-2539C4BE55A0B69FC125712A0048AA71-Annen:SPIE:2006 %I SPIE %D 2006 %B Untitled Event %Z date of event: 2006-01-15 - %C San Jose, USA %X 3D display technology holds great promise for the future of television, virtual reality, entertainment, and visualization. Multiview parallax displays deliver stereoscopic views without glasses to arbitrary positions within the viewing zone. These systems must include a high-performance and scalable 3D rendering subsystem in order to generate multiple views at real-time frame rates. This paper describes a distributed rendering system for large-scale multiview parallax displays built with a network of PCs, commodity graphics accelerators, multiple projectors, and multiview screens. The main challenge is to render various perspective views of the scene and assign rendering tasks effectively. In this paper we investigate two different approaches: Optical multiplexing for lenticular screens and software multiplexing for parallax-barrier displays. We describe the construction of largescale multi-projector 3D display systems using lenticular and parallax-barrier technology. We have developed different distributed rendering algorithms using the Chromium stream-processing framework and evaluate the trade-offs and performance bottlenecks. Our results show that Chromium is well suited for interactive rendering on multiview parallax displays. %B Proceedings of Stereoscopic Displays and Virtual Reality Systems XIII %P 231 - 240 %I SPIE %@ 0-8194-6095-8 %B SPIE
Albrecht, I., Kipp, M., Neff, M.P., and Seidel, H.-P. 2006. Gesture modeling and animation by imitation. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
Animated characters that move and gesticulate appropriately with spoken text are useful in a wide range of applications. Unfortunately, they are very difficult to generate, even more so when a unique, individual movement style is required. We present a system that is capable of producing full-body gesture animation for given input text in the style of a particular performer. Our process starts with video of a performer whose gesturing style we wish to animate. A tool-assisted annotation process is first performed on the video, from which a statistical model of the person.s particular gesturing style is built. Using this model and tagged input text, our generation algorithm creates a gesture script appropriate for the given text. As opposed to isolated singleton gestures, our gesture script specifies a stream of continuous gestures coordinated with speech. This script is passed to an animation system, which enhances the gesture description with more detail and prepares a refined description of the motion. An animation subengine can then generate either kinematic or physically simulated motion based on this description. The system is capable of creating animation that replicates a particular performance in the video corpus, generating new animation for the spoken text that is consistent with the given performer.s style and creating performances of a given text sample in the style of different performers.
Export
BibTeX
@techreport{AlbrechtKippNeffSeidel2006, TITLE = {Gesture modeling and animation by imitation}, AUTHOR = {Albrecht, Irene and Kipp, Michael and Neff, Michael Paul and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2006-4-008}, NUMBER = {MPI-I-2006-4-008}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {Animated characters that move and gesticulate appropriately with spoken text are useful in a wide range of applications. Unfortunately, they are very difficult to generate, even more so when a unique, individual movement style is required. We present a system that is capable of producing full-body gesture animation for given input text in the style of a particular performer. Our process starts with video of a performer whose gesturing style we wish to animate. A tool-assisted annotation process is first performed on the video, from which a statistical model of the person.s particular gesturing style is built. Using this model and tagged input text, our generation algorithm creates a gesture script appropriate for the given text. As opposed to isolated singleton gestures, our gesture script specifies a stream of continuous gestures coordinated with speech. This script is passed to an animation system, which enhances the gesture description with more detail and prepares a refined description of the motion. An animation subengine can then generate either kinematic or physically simulated motion based on this description. The system is capable of creating animation that replicates a particular performance in the video corpus, generating new animation for the spoken text that is consistent with the given performer.s style and creating performances of a given text sample in the style of different performers.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Albrecht, Irene %A Kipp, Michael %A Neff, Michael Paul %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Multimodal Computing and Interaction Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Gesture modeling and animation by imitation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-6979-2 %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2006-4-008 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2006 %P 62 p. %X Animated characters that move and gesticulate appropriately with spoken text are useful in a wide range of applications. Unfortunately, they are very difficult to generate, even more so when a unique, individual movement style is required. We present a system that is capable of producing full-body gesture animation for given input text in the style of a particular performer. Our process starts with video of a performer whose gesturing style we wish to animate. A tool-assisted annotation process is first performed on the video, from which a statistical model of the person.s particular gesturing style is built. Using this model and tagged input text, our generation algorithm creates a gesture script appropriate for the given text. As opposed to isolated singleton gestures, our gesture script specifies a stream of continuous gestures coordinated with speech. This script is passed to an animation system, which enhances the gesture description with more detail and prepares a refined description of the motion. An animation subengine can then generate either kinematic or physically simulated motion based on this description. The system is capable of creating animation that replicates a particular performance in the video corpus, generating new animation for the spoken text that is consistent with the given performer.s style and creating performances of a given text sample in the style of different performers. %B Research Report / Max-Planck-Institut f&#252;r Informatik
2005
Ziegler, G., Magnor, M., and Seidel, H.-P. 2005. GeoCast: Unifying Depth Video with Camera Meta-Data. 2nd Workshop on Immersive Communication and Broadcast Systems, Heinrich-Hertz-Institute.
Abstract
We present a storage format for placing multiple, dynamic 2.5D point sample streams (e.g. RGBZ video) or color video projections into the context of a common world space. The approach utilizes the concept of projective geometry to let virtual projectors "cast" the data from where it was originally recorded by the involved cameras. We exemplify the data format's versatility by demonstrating how several moving cameras reproduce 3D geometry exported from modeling software, and outline the extension to real-world acquisition. We also explain how this format can act as common interchange format for the camera parameters of lightfield/multi-view video footage.
Export
BibTeX
@inproceedings{Ziegler2004, TITLE = {{GeoCast}: Unifying Depth Video with Camera Meta-Data}, AUTHOR = {Ziegler, Gernot and Magnor, Marcus and Seidel, Hans-Peter}, EDITOR = {Weigel, Christian and Sch{\"u}bel, Peter and Harezlak, Daniel Franciszek}, LANGUAGE = {eng}, LOCALID = {Local-ID: C125675300671F7B-86E217591A5FE3F9C12570B40039EEE3-Ziegler2004}, PUBLISHER = {Heinrich-Hertz-Institute}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {We present a storage format for placing multiple, dynamic 2.5D point sample streams (e.g. RGBZ video) or color video projections into the context of a common world space. The approach utilizes the concept of projective geometry to let virtual projectors "cast" the data from where it was originally recorded by the involved cameras. We exemplify the data format's versatility by demonstrating how several moving cameras reproduce 3D geometry exported from modeling software, and outline the extension to real-world acquisition. We also explain how this format can act as common interchange format for the camera parameters of lightfield/multi-view video footage.}, BOOKTITLE = {2nd Workshop on Immersive Communication and Broadcast Systems}, SERIES = {Conference CD}, }
Endnote
%0 Conference Proceedings %A Ziegler, Gernot %A Magnor, Marcus %A Seidel, Hans-Peter %E Weigel, Christian %E Sch&#252;bel, Peter %E Harezlak, Daniel Franciszek %+ Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T GeoCast: Unifying Depth Video with Camera Meta-Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-26AA-D %F EDOC: 278950 %F OTHER: Local-ID: C125675300671F7B-86E217591A5FE3F9C12570B40039EEE3-Ziegler2004 %I Heinrich-Hertz-Institute %D 2005 %B Untitled Event %Z date of event: 2005-10-27 - %C Berlin, Germany %X We present a storage format for placing multiple, dynamic 2.5D point sample streams (e.g. RGBZ video) or color video projections into the context of a common world space. The approach utilizes the concept of projective geometry to let virtual projectors "cast" the data from where it was originally recorded by the involved cameras. We exemplify the data format's versatility by demonstrating how several moving cameras reproduce 3D geometry exported from modeling software, and outline the extension to real-world acquisition. We also explain how this format can act as common interchange format for the camera parameters of lightfield/multi-view video footage. %B 2nd Workshop on Immersive Communication and Broadcast Systems %P - 4 %I Heinrich-Hertz-Institute %B Conference CD
Zayer, R., Rössl, C., and Seidel, H.-P. 2005a. Variations on Angle Based Flattening. In: Advances in Multiresolution for Geometric Modelling. Springer.
Export
BibTeX
@incollection{DBLP:books/sp/05/ZayerRS05, TITLE = {Variations on Angle Based Flattening}, AUTHOR = {Zayer, Rhaleb and R{\"o}ssl, Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1612-3786}, ISBN = {978-3-540-21462-5}, DOI = {10.1007/3-540-26808-1_10}, PUBLISHER = {Springer}, YEAR = {2005}, DATE = {2005}, BOOKTITLE = {Advances in Multiresolution for Geometric Modelling}, EDITOR = {Dodgson, Neil A. and Floater, Michael S. and Sabin, Malcolm A.}, PAGES = {187--199}, SERIES = {Mathematics and Visualization}, }
Endnote
%0 Book Section %A Zayer, Rhaleb %A R&#246;ssl, Christian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Variations on Angle Based Flattening : %G eng %U http://hdl.handle.net/21.11116/0000-000F-2944-7 %R 10.1007/3-540-26808-1_10 %D 2005 %B Advances in Multiresolution for Geometric Modelling %E Dodgson, Neil A.; Floater, Michael S.; Sabin, Malcolm A. %P 187 - 199 %I Springer %@ 978-3-540-21462-5 %S Mathematics and Visualization %@ false %U https://rdcu.be/dEZx3
Zayer, R., Rössl, C., Karni, Z., and Seidel, H.-P. 2005b. Harmonic Guidance for Surface Deformation. Computer Graphics Forum (Proc. EUROGRAPHICS 2005), Blackwell.
Abstract
We present an interactive method for applying deformations to a<br> surface mesh while preserving its global shape and local properties.<br> Two surface editing scenarios are discussed, which conceptually<br> differ in the specification of deformations:<br> Either interpolation constraints are imposed explicitly, e.g., by<br> dragging a subset of vertices, or, deformation of a reference<br> surface is mimicked.<br><br> The contribution of this paper is a novel approach for interpolation<br> of local deformations over the manifold and for efficiently<br> establishing correspondence to a reference surface from only few<br> pairs of markers.<br> As a general tool for both scenarios, a harmonic field is<br> constructed to guide the interpolation of constraints and to find<br> correspondence required for deformation transfer.<br> We show that our approach fits nicely in a unified mathematical<br> framework, where the same type of linear operator is applied in all<br> phases, and how this approach can be used to create an intuitive and<br> interactive editing tool.
Export
BibTeX
@inproceedings{Zayer-et-al_EUROGRAPHICS05, TITLE = {Harmonic Guidance for Surface Deformation}, AUTHOR = {Zayer, Rhaleb and R{\"o}ssl, Christian and Karni, Zachi and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2005.00885.x}, LOCALID = {Local-ID: C125675300671F7B-B0426666CAE4649CC1256FE2003238CF-zayer:hgsd:2005}, PUBLISHER = {Blackwell}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {We present an interactive method for applying deformations to a<br> surface mesh while preserving its global shape and local properties.<br> Two surface editing scenarios are discussed, which conceptually<br> differ in the specification of deformations:<br> Either interpolation constraints are imposed explicitly, e.g., by<br> dragging a subset of vertices, or, deformation of a reference<br> surface is mimicked.<br><br> The contribution of this paper is a novel approach for interpolation<br> of local deformations over the manifold and for efficiently<br> establishing correspondence to a reference surface from only few<br> pairs of markers.<br> As a general tool for both scenarios, a harmonic field is<br> constructed to guide the interpolation of constraints and to find<br> correspondence required for deformation transfer.<br> We show that our approach fits nicely in a unified mathematical<br> framework, where the same type of linear operator is applied in all<br> phases, and how this approach can be used to create an intuitive and<br> interactive editing tool.}, BOOKTITLE = {The European Association for Computer Graphics 26th Annual Conference: EUROGRAPHICS 2005}, EDITOR = {Alexa, Marc and Marks, Joe}, PAGES = {601--609}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {24}, ISSUE = {3}, ADDRESS = {Dublin, Ireland}, }
Endnote
%0 Conference Proceedings %A Zayer, Rhaleb %A R&#246;ssl, Christian %A Karni, Zachi %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Harmonic Guidance for Surface Deformation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-26B4-6 %F EDOC: 278954 %F OTHER: Local-ID: C125675300671F7B-B0426666CAE4649CC1256FE2003238CF-zayer:hgsd:2005 %R 10.1111/j.1467-8659.2005.00885.x %D 2005 %B The European Association for Computer Graphics 26th Annual Conference %Z date of event: 2005-08-29 - %C Dublin, Ireland %X We present an interactive method for applying deformations to a<br> surface mesh while preserving its global shape and local properties.<br> Two surface editing scenarios are discussed, which conceptually<br> differ in the specification of deformations:<br> Either interpolation constraints are imposed explicitly, e.g., by<br> dragging a subset of vertices, or, deformation of a reference<br> surface is mimicked.<br><br> The contribution of this paper is a novel approach for interpolation<br> of local deformations over the manifold and for efficiently<br> establishing correspondence to a reference surface from only few<br> pairs of markers.<br> As a general tool for both scenarios, a harmonic field is<br> constructed to guide the interpolation of constraints and to find<br> correspondence required for deformation transfer.<br> We show that our approach fits nicely in a unified mathematical<br> framework, where the same type of linear operator is applied in all<br> phases, and how this approach can be used to create an intuitive and<br> interactive editing tool. %B The European Association for Computer Graphics 26th Annual Conference: EUROGRAPHICS 2005 %E Alexa, Marc; Marks, Joe %P 601 - 609 %I Blackwell %J Computer Graphics Forum %V 24 %N 3 %I Blackwell-Wiley %@ false
Zayer, R., Rössl, C., and Seidel, H.-P. 2005c. Setting the Boundary Free: A Composite Approach to Surface Parameterization. Proceedings of the third Eurographics Symposium on Geometry Processing (SGP 2005), Eurographics.
Abstract
In the last decade, surface mesh parameterization has emerged as a standard <br>technique in computer graphics. The ever increasing need for processing large <br>and highly detailed data sets fosters the development of ef cient <br>parameterization techniques that can capture the geometry of the input meshes <br>and produce low distortion planar maps. We present a set of novel techniques <br>allowing for low distortion parameterization. In particular, we address one of <br>the major shortcomings of linear methods by allowing the parametric <br>representation to evolve freely on the plane without any fixed boundary <br>vertices. Our method consists of several simple steps, each solving a linear <br>problem. Our results exhibit a fair balance between high-quality and <br>computational ef ciency.
Export
BibTeX
@inproceedings{Zayer-et-al_SGP05, TITLE = {Setting the Boundary Free: A Composite Approach to Surface Parameterization}, AUTHOR = {Zayer, Rhaleb and R{\"o}ssl, Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.2312/SGP/SGP05/091-100}, LOCALID = {Local-ID: C125675300671F7B-245A753A890F1A95C1257027004DC836-zayer:bf:2005}, PUBLISHER = {Eurographics}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {In the last decade, surface mesh parameterization has emerged as a standard <br>technique in computer graphics. The ever increasing need for processing large <br>and highly detailed data sets fosters the development of ef cient <br>parameterization techniques that can capture the geometry of the input meshes <br>and produce low distortion planar maps. We present a set of novel techniques <br>allowing for low distortion parameterization. In particular, we address one of <br>the major shortcomings of linear methods by allowing the parametric <br>representation to evolve freely on the plane without any fixed boundary <br>vertices. Our method consists of several simple steps, each solving a linear <br>problem. Our results exhibit a fair balance between high-quality and <br>computational ef ciency.}, BOOKTITLE = {Proceedings of the third Eurographics Symposium on Geometry Processing (SGP 2005)}, EDITOR = {Desbrun, Mathieu and Pottmann, Helmut}, PAGES = {91--100}, ADDRESS = {Vienna, Austria}, }
Endnote
%0 Conference Proceedings %A Zayer, Rhaleb %A R&#246;ssl, Christian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Setting the Boundary Free: A Composite Approach to Surface Parameterization : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-27AE-E %F EDOC: 278953 %F OTHER: Local-ID: C125675300671F7B-245A753A890F1A95C1257027004DC836-zayer:bf:2005 %R 10.2312/SGP/SGP05/091-100 %D 2005 %B Third Eurographics Symposium on Geometry Processing %Z date of event: 2005-07-04 - %C Vienna, Austria %X In the last decade, surface mesh parameterization has emerged as a standard <br>technique in computer graphics. The ever increasing need for processing large <br>and highly detailed data sets fosters the development of ef cient <br>parameterization techniques that can capture the geometry of the input meshes <br>and produce low distortion planar maps. We present a set of novel techniques <br>allowing for low distortion parameterization. In particular, we address one of <br>the major shortcomings of linear methods by allowing the parametric <br>representation to evolve freely on the plane without any fixed boundary <br>vertices. Our method consists of several simple steps, each solving a linear <br>problem. Our results exhibit a fair balance between high-quality and <br>computational ef ciency. %B Proceedings of the third Eurographics Symposium on Geometry Processing %E Desbrun, Mathieu; Pottmann, Helmut %P 91 - 100 %I Eurographics
Zayer, R., Rössl, C., and Seidel, H.-P. 2005d. Discrete Tensorial Quasi-harmonic Maps. Shape Modeling International 2005 (SMI 2005), IEEE.
Abstract
We introduce new linear operators for surface parameterization.<br> Given an initial mapping from the parametric plane onto a surface mesh, we<br> establish a secondary map of the plane onto itself that mimics the initial<br> one.<br> The resulting low-distortion parameterization is smooth as it stems from<br> solving a quasi-harmonic equation.<br> Our parameterization method is robust and independent of (the quality of) the<br> initial map.<br> In fact, for most cases the methods converges from a simple projection on the<br> least squares plane even for complex models.
Export
BibTeX
@inproceedings{Zayer-et-al_SMI05, TITLE = {Discrete Tensorial Quasi-harmonic Maps}, AUTHOR = {Zayer, Rhaleb and R{\"o}ssl, Christian and Seidel, Hans-Peter}, EDITOR = {Belyaev, Alexander}, LANGUAGE = {eng}, ISBN = {0-7695-2379-X}, DOI = {10.1109/SMI.2005.17}, LOCALID = {Local-ID: C125675300671F7B-62611A9AA502D8EEC1256FB9004FF74E-ZayerSMI05}, PUBLISHER = {IEEE}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {We introduce new linear operators for surface parameterization.<br> Given an initial mapping from the parametric plane onto a surface mesh, we<br> establish a secondary map of the plane onto itself that mimics the initial<br> one.<br> The resulting low-distortion parameterization is smooth as it stems from<br> solving a quasi-harmonic equation.<br> Our parameterization method is robust and independent of (the quality of) the<br> initial map.<br> In fact, for most cases the methods converges from a simple projection on the<br> least squares plane even for complex models.}, BOOKTITLE = {Shape Modeling International 2005 (SMI 2005)}, DEBUG = {editor: Spagnuolo, Michaela; editor: Pasko, Alexander; editor: Belyaev, Alexander}, PAGES = {276--285}, ADDRESS = {Cambridge, MA, USA}, }
Endnote
%0 Conference Proceedings %A Zayer, Rhaleb %A R&#246;ssl, Christian %A Seidel, Hans-Peter %E Belyaev, Alexander %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Discrete Tensorial Quasi-harmonic Maps : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2643-6 %F EDOC: 278951 %F OTHER: Local-ID: C125675300671F7B-62611A9AA502D8EEC1256FB9004FF74E-ZayerSMI05 %R 10.1109/SMI.2005.17 %D 2005 %B 2005 International Conference on Shape Modeling and Applications %Z date of event: 2005-06-13 - 2005-06-17 %C Cambridge, MA, USA %X We introduce new linear operators for surface parameterization.<br> Given an initial mapping from the parametric plane onto a surface mesh, we<br> establish a secondary map of the plane onto itself that mimics the initial<br> one.<br> The resulting low-distortion parameterization is smooth as it stems from<br> solving a quasi-harmonic equation.<br> Our parameterization method is robust and independent of (the quality of) the<br> initial map.<br> In fact, for most cases the methods converges from a simple projection on the<br> least squares plane even for complex models. %B Shape Modeling International 2005 (SMI 2005) %E Spagnuolo, Michaela; Pasko, Alexander; Belyaev, Alexander %P 276 - 285 %I IEEE %@ 0-7695-2379-X
Yoshizawa, S., Belyaev, A., and Seidel, H.-P. 2005a. Fast and Robust Detection of Crest Lines on Meshes. Proceedings of the Ninth ACM Symposium on Solid and Physical Modeling 2005 (SPM 2005), ACM.
Abstract
We propose a fast and robust method for detecting<br>crest lines on surfaces approximated by dense triangle meshes.<br>The crest lines, salient surface features defined via first- and <br>second-order curvature derivatives, are widely used for shape <br>matching and interrogation purposes. Their practical extraction <br>is difficult because it requires good estimation of high-order<br>surface derivatives. Our approach to the crest line detection <br>is based on estimating the curvature tensor and curvature <br>derivatives via local polynomial fitting. <br>Since the crest lines are not defined in the surface regions<br>where the surface focal set (caustic) degenerates, we introduce<br>a new thresholding scheme which exploits interesting relationships <br>between curvature extrema, the so-called MVS functional of Moreton <br>and Sequin, and Dupin cyclides, <br>An application of the crest lines to adaptive mesh simplification<br>is also considered.
Export
BibTeX
@inproceedings{Yoshizawa-et-al_SPM05, TITLE = {Fast and Robust Detection of Crest Lines on Meshes}, AUTHOR = {Yoshizawa, Shin and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {1-59593-015-9}, DOI = {10.1145/1060244.1060270}, LOCALID = {Local-ID: C125675300671F7B-B689675F69641AFCC1256FC000660260-Yoshizawa_spm05}, PUBLISHER = {ACM}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {We propose a fast and robust method for detecting<br>crest lines on surfaces approximated by dense triangle meshes.<br>The crest lines, salient surface features defined via first- and <br>second-order curvature derivatives, are widely used for shape <br>matching and interrogation purposes. Their practical extraction <br>is difficult because it requires good estimation of high-order<br>surface derivatives. Our approach to the crest line detection <br>is based on estimating the curvature tensor and curvature <br>derivatives via local polynomial fitting. <br>Since the crest lines are not defined in the surface regions<br>where the surface focal set (caustic) degenerates, we introduce<br>a new thresholding scheme which exploits interesting relationships <br>between curvature extrema, the so-called MVS functional of Moreton <br>and Sequin, and Dupin cyclides, <br>An application of the crest lines to adaptive mesh simplification<br>is also considered.}, BOOKTITLE = {Proceedings of the Ninth ACM Symposium on Solid and Physical Modeling 2005 (SPM 2005)}, EDITOR = {Kobbelt, Leif and Shapiro, Vadim}, PAGES = {227--232}, ADDRESS = {Cambridge, MA, USA}, }
Endnote
%0 Conference Proceedings %A Yoshizawa, Shin %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Fast and Robust Detection of Crest Lines on Meshes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2686-E %F EDOC: 278956 %F OTHER: Local-ID: C125675300671F7B-B689675F69641AFCC1256FC000660260-Yoshizawa_spm05 %R 10.1145/1060244.1060270 %D 2005 %B The Ninth ACM Symposium on Solid and Physical Modeling 2005 %Z date of event: 2005-06-13 - 2005-06-15 %C Cambridge, MA, USA %X We propose a fast and robust method for detecting<br>crest lines on surfaces approximated by dense triangle meshes.<br>The crest lines, salient surface features defined via first- and <br>second-order curvature derivatives, are widely used for shape <br>matching and interrogation purposes. Their practical extraction <br>is difficult because it requires good estimation of high-order<br>surface derivatives. Our approach to the crest line detection <br>is based on estimating the curvature tensor and curvature <br>derivatives via local polynomial fitting. <br>Since the crest lines are not defined in the surface regions<br>where the surface focal set (caustic) degenerates, we introduce<br>a new thresholding scheme which exploits interesting relationships <br>between curvature extrema, the so-called MVS functional of Moreton <br>and Sequin, and Dupin cyclides, <br>An application of the crest lines to adaptive mesh simplification<br>is also considered. %B Proceedings of the Ninth ACM Symposium on Solid and Physical Modeling 2005 %E Kobbelt, Leif; Shapiro, Vadim %P 227 - 232 %I ACM %@ 1-59593-015-9
Yoshizawa, S., Belyaev, A., and Seidel, H.-P. 2005b. A Moving Mesh Approach to Stretch-minimizing Mesh Parameterization. International Journal of Shape Modeling11, 1.
Abstract
We propose to use a moving mesh approach, a popular grid adaption<br>technique in computational mechanics, for fast generating <br>low-stretch mesh parameterizations. Given a triangle mesh approximating <br>a surface, we construct an initial parameterization of the mesh <br>and then improve the parameterization gradually. At each improvement step,<br>we optimize the parameterization generated at the previous step<br>by minimizing a weighted quadratic energy where the weights <br>are chosen in order to minimize the parameterization stretch.<br>This optimization procedure does not generate triangle <br>flips if the boundary of the parameter domain is a convex polygon. <br>Moreover already the first optimization step produces a high-quality mesh <br>parameterization. We compare our parameterization procedure with <br>several state-of-art mesh parameterization methods and demonstrate <br>its speed and high efficiency in parameterizing large and geometrically <br>complex models.
Export
BibTeX
@article{Yoshizawa-et-al_IJSM05, TITLE = {A Moving Mesh Approach to Stretch-minimizing Mesh Parameterization}, AUTHOR = {Yoshizawa, Shin and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0218-6543}, DOI = {10.1142/S0218654305000712}, LOCALID = {Local-ID: C125675300671F7B-64CF1DD79F60D2B7C125712E002CC3E8-YoshizawaIJSM2005}, PUBLISHER = {World Scientific}, ADDRESS = {Singapore}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {We propose to use a moving mesh approach, a popular grid adaption<br>technique in computational mechanics, for fast generating <br>low-stretch mesh parameterizations. Given a triangle mesh approximating <br>a surface, we construct an initial parameterization of the mesh <br>and then improve the parameterization gradually. At each improvement step,<br>we optimize the parameterization generated at the previous step<br>by minimizing a weighted quadratic energy where the weights <br>are chosen in order to minimize the parameterization stretch.<br>This optimization procedure does not generate triangle <br>flips if the boundary of the parameter domain is a convex polygon. <br>Moreover already the first optimization step produces a high-quality mesh <br>parameterization. We compare our parameterization procedure with <br>several state-of-art mesh parameterization methods and demonstrate <br>its speed and high efficiency in parameterizing large and geometrically <br>complex models.}, JOURNAL = {International Journal of Shape Modeling}, VOLUME = {11}, NUMBER = {1}, PAGES = {25--42}, }
Endnote
%0 Journal Article %A Yoshizawa, Shin %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Moving Mesh Approach to Stretch-minimizing Mesh Parameterization : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2597-0 %F EDOC: 278957 %F OTHER: Local-ID: C125675300671F7B-64CF1DD79F60D2B7C125712E002CC3E8-YoshizawaIJSM2005 %R 10.1142/S0218654305000712 %D 2005 %* Review method: peer-reviewed %X We propose to use a moving mesh approach, a popular grid adaption<br>technique in computational mechanics, for fast generating <br>low-stretch mesh parameterizations. Given a triangle mesh approximating <br>a surface, we construct an initial parameterization of the mesh <br>and then improve the parameterization gradually. At each improvement step,<br>we optimize the parameterization generated at the previous step<br>by minimizing a weighted quadratic energy where the weights <br>are chosen in order to minimize the parameterization stretch.<br>This optimization procedure does not generate triangle <br>flips if the boundary of the parameter domain is a convex polygon. <br>Moreover already the first optimization step produces a high-quality mesh <br>parameterization. We compare our parameterization procedure with <br>several state-of-art mesh parameterization methods and demonstrate <br>its speed and high efficiency in parameterizing large and geometrically <br>complex models. %J International Journal of Shape Modeling %V 11 %N 1 %& 25 %P 25 - 42 %I World Scientific %C Singapore %@ false
Yoshida, A., Blanz, V., Myszkowski, K., and Seidel, H.-P. 2005. Perceptual Evaluation of Tone Mapping Operators with Real-World Scenes. Human Vision and Electronic Imaging X, IS&T/SPIE’s 17th Annual Symposium on Electronic Imaging (2005), SPIE.
Abstract
A number of successful tone mapping operators for contrast compression have <br>been proposed due to the need<br>to visualize high dynamic range (HDR) images on low dynamic range devices. They <br>were inspired by Øelds<br>as diverse as image processing, photographic practice, and modeling of the <br>human visual systems (HVS). The<br>variety of approaches calls for a systematic perceptual evaluation of their <br>performance.<br>We conduct a psychophysical experiment based on a direct comparison between the <br>appearance of real-world<br>scenes and HDR images of these scenes displayed on a low dynamic range monitor. <br>In our experiment, HDR<br>images are tone mapped by seven existing tone mapping operators. The primary <br>interest of this psychophysical<br>experiment is to assess the diÆerences in how tone mapped images are perceived <br>by human observers and to Ønd<br>out which attributes of image appearance account for these diÆerences when tone <br>mapped images are compared<br>directly with their corresponding real-world scenes rather than with each <br>other. The human subjects rate image<br>naturalness, overall contrast, overall brightness, and detail reproduction in <br>dark and bright image regions with<br>respect to the corresponding real-world scene.<br>The results indicate substantial diÆerences in perception of images produced by <br>individual tone mapping<br>operators. We observe a clear distinction between global and local operators in <br>favor of the latter, and we<br>classify the tone mapping operators according to naturalness and appearance <br>attributes.ce attributes.
Export
BibTeX
@inproceedings{Yoshida-et-al_SPIE05, TITLE = {Perceptual Evaluation of Tone Mapping Operators with Real-World Scenes}, AUTHOR = {Yoshida, Akiko and Blanz, Volker and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0277-786X}, ISBN = {978-0-8194-5639-7}, DOI = {10.1117/12.587782}, LOCALID = {Local-ID: C125675300671F7B-6BD5753531007D22C1256F5C006B5D8C-Yoshida2005}, PUBLISHER = {SPIE}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {A number of successful tone mapping operators for contrast compression have <br>been proposed due to the need<br>to visualize high dynamic range (HDR) images on low dynamic range devices. They <br>were inspired by {\O}elds<br>as diverse as image processing, photographic practice, and modeling of the <br>human visual systems (HVS). The<br>variety of approaches calls for a systematic perceptual evaluation of their <br>performance.<br>We conduct a psychophysical experiment based on a direct comparison between the <br>appearance of real-world<br>scenes and HDR images of these scenes displayed on a low dynamic range monitor. <br>In our experiment, HDR<br>images are tone mapped by seven existing tone mapping operators. The primary <br>interest of this psychophysical<br>experiment is to assess the di{\AE}erences in how tone mapped images are perceived <br>by human observers and to {\O}nd<br>out which attributes of image appearance account for these di{\AE}erences when tone <br>mapped images are compared<br>directly with their corresponding real-world scenes rather than with each <br>other. The human subjects rate image<br>naturalness, overall contrast, overall brightness, and detail reproduction in <br>dark and bright image regions with<br>respect to the corresponding real-world scene.<br>The results indicate substantial di{\AE}erences in perception of images produced by <br>individual tone mapping<br>operators. We observe a clear distinction between global and local operators in <br>favor of the latter, and we<br>classify the tone mapping operators according to naturalness and appearance <br>attributes.ce attributes.}, BOOKTITLE = {Human Vision and Electronic Imaging X, IS\&T/SPIE's 17th Annual Symposium on Electronic Imaging (2005)}, EDITOR = {Rogowitz, Bernice E. and Pappas, Thrasyvoulos N. and Daly, Scott J.}, PAGES = {192--203}, SERIES = {SPIE Proceedings Series}, VOLUME = {5666}, ADDRESS = {San Jose, CA, USA}, }
Endnote
%0 Conference Proceedings %A Yoshida, Akiko %A Blanz, Volker %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptual Evaluation of Tone Mapping Operators with Real-World Scenes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2759-C %F EDOC: 278958 %F OTHER: Local-ID: C125675300671F7B-6BD5753531007D22C1256F5C006B5D8C-Yoshida2005 %R 10.1117/12.587782 %D 2005 %B IS&T/SPIE's 17th Annual Symposium on Electronic Imaging %Z date of event: 2005-01-17 - %C San Jose, CA, USA %X A number of successful tone mapping operators for contrast compression have <br>been proposed due to the need<br>to visualize high dynamic range (HDR) images on low dynamic range devices. They <br>were inspired by &#216;elds<br>as diverse as image processing, photographic practice, and modeling of the <br>human visual systems (HVS). The<br>variety of approaches calls for a systematic perceptual evaluation of their <br>performance.<br>We conduct a psychophysical experiment based on a direct comparison between the <br>appearance of real-world<br>scenes and HDR images of these scenes displayed on a low dynamic range monitor. <br>In our experiment, HDR<br>images are tone mapped by seven existing tone mapping operators. The primary <br>interest of this psychophysical<br>experiment is to assess the di&#198;erences in how tone mapped images are perceived <br>by human observers and to &#216;nd<br>out which attributes of image appearance account for these di&#198;erences when tone <br>mapped images are compared<br>directly with their corresponding real-world scenes rather than with each <br>other. The human subjects rate image<br>naturalness, overall contrast, overall brightness, and detail reproduction in <br>dark and bright image regions with<br>respect to the corresponding real-world scene.<br>The results indicate substantial di&#198;erences in perception of images produced by <br>individual tone mapping<br>operators. We observe a clear distinction between global and local operators in <br>favor of the latter, and we<br>classify the tone mapping operators according to naturalness and appearance <br>attributes.ce attributes. %B Human Vision and Electronic Imaging X, IS&T/SPIE's 17th Annual Symposium on Electronic Imaging (2005) %E Rogowitz, Bernice E.; Pappas, Thrasyvoulos N.; Daly, Scott J. %P 192 - 203 %I SPIE %@ 978-0-8194-5639-7 %B SPIE Proceedings Series %N 5666 %@ false
Yamauchi, H., Lensch, H.P.A., Haber, J., and Seidel, H.-P. 2005a. Textures Revisited. The Visual Computer21.
Abstract
We describe texture generation methods for complex objects. Recent 3D<br> scanning devices and high-resolution cameras can<br> capture complex geometry of an object and provide high-resolution<br> images. However, generating a textured model from this input<br> data is still a difficult problem.<br><br> This task is divided into three sub-problems: parameterization, texture<br> combination, and texture restoration. A low distortion parameterization<br> method is presented, which minimizes geometry stretch<br> energy. Photographs of the object taken from multiple viewpoints under<br> modestly uncontrolled illumination conditions are merged into a seamless<br> texture by our new texture combination method.<br><br> We also demonstrate a texture restoration method which can fill in<br> missing pixel information when the input photographs do not provide<br> sufficient information to cover the entire surface due to<br> self-occlusion or registration errors.<br><br>Our methods are fully automatic except the registration between a 3D<br> model with input photographs. We demonstrate the application of our<br> method to human face models for evaluation. The techniques presented in<br> this paper make a consistent and complete pipeline to generate a<br> texture of a complex object.
Export
BibTeX
@article{Yamauchi-et-al_VC05, TITLE = {Textures Revisited}, AUTHOR = {Yamauchi, Hitoshi and Lensch, Hendrik P. A. and Haber, J{\"o}rg and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0178-2789}, DOI = {10.1007/s00371-005-0283-5}, LOCALID = {Local-ID: C125675300671F7B-AE01F1AE12A2232DC1256FB90052EA12-Yamauchi:TR}, PUBLISHER = {Springer International}, ADDRESS = {Berlin}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {We describe texture generation methods for complex objects. Recent 3D<br> scanning devices and high-resolution cameras can<br> capture complex geometry of an object and provide high-resolution<br> images. However, generating a textured model from this input<br> data is still a difficult problem.<br><br> This task is divided into three sub-problems: parameterization, texture<br> combination, and texture restoration. A low distortion parameterization<br> method is presented, which minimizes geometry stretch<br> energy. Photographs of the object taken from multiple viewpoints under<br> modestly uncontrolled illumination conditions are merged into a seamless<br> texture by our new texture combination method.<br><br> We also demonstrate a texture restoration method which can fill in<br> missing pixel information when the input photographs do not provide<br> sufficient information to cover the entire surface due to<br> self-occlusion or registration errors.<br><br>Our methods are fully automatic except the registration between a 3D<br> model with input photographs. We demonstrate the application of our<br> method to human face models for evaluation. The techniques presented in<br> this paper make a consistent and complete pipeline to generate a<br> texture of a complex object.}, JOURNAL = {The Visual Computer}, VOLUME = {21}, PAGES = {217--241}, }
Endnote
%0 Journal Article %A Yamauchi, Hitoshi %A Lensch, Hendrik P. A. %A Haber, J&#246;rg %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Textures Revisited : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-27DF-2 %F EDOC: 278959 %F OTHER: Local-ID: C125675300671F7B-AE01F1AE12A2232DC1256FB90052EA12-Yamauchi:TR %R 10.1007/s00371-005-0283-5 %D 2005 %* Review method: peer-reviewed %X We describe texture generation methods for complex objects. Recent 3D<br> scanning devices and high-resolution cameras can<br> capture complex geometry of an object and provide high-resolution<br> images. However, generating a textured model from this input<br> data is still a difficult problem.<br><br> This task is divided into three sub-problems: parameterization, texture<br> combination, and texture restoration. A low distortion parameterization<br> method is presented, which minimizes geometry stretch<br> energy. Photographs of the object taken from multiple viewpoints under<br> modestly uncontrolled illumination conditions are merged into a seamless<br> texture by our new texture combination method.<br><br> We also demonstrate a texture restoration method which can fill in<br> missing pixel information when the input photographs do not provide<br> sufficient information to cover the entire surface due to<br> self-occlusion or registration errors.<br><br>Our methods are fully automatic except the registration between a 3D<br> model with input photographs. We demonstrate the application of our<br> method to human face models for evaluation. The techniques presented in<br> this paper make a consistent and complete pipeline to generate a<br> texture of a complex object. %J The Visual Computer %V 21 %& 217 %P 217 - 241 %I Springer International %C Berlin %@ false %U https://rdcu.be/dHi6F
Yamauchi, H., Lee, S., Lee, Y., Ohtake, Y., Belyaev, A., and Seidel, H.-P. 2005b. Feature Sensitive Mesh Segmentation with Mean Shift. Shape Modeling International 2005 (SMI 2005), IEEE.
Abstract
Feature sensitive mesh segmentation is important for many<br>computer graphics and geometric modeling applications. In this<br>paper, we develop a mesh segmentation method which is capable of<br>producing high-quality shape partitioning. It respects fine shape<br>features and works well on various types of shapes, including<br>natural shapes and mechanical parts.<br>The method combines a procedure for clustering mesh normals<br>with a modification of the mesh chartification technique \cite{Sander_sig03}.<br>For clustering of mesh normals, we adapt Mean Shift,<br>a powerful general purpose technique for clustering scattered data.<br>We demonstrate advantages of our method by comparing it with two<br>state-of-the-art mesh segmentation techniques.
Export
BibTeX
@inproceedings{Yamauchi-et-al_SMI05, TITLE = {Feature Sensitive Mesh Segmentation with Mean Shift}, AUTHOR = {Yamauchi, Hitoshi and Lee, Seungyong and Lee, Yunjin and Ohtake, Yutaka and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-2379-X}, DOI = {10.1109/SMI.2005.21}, LOCALID = {Local-ID: C125675300671F7B-40645AA425C269E4C1256FC60056CA4F-Yamauchi:FSMS:2005}, PUBLISHER = {IEEE}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {Feature sensitive mesh segmentation is important for many<br>computer graphics and geometric modeling applications. In this<br>paper, we develop a mesh segmentation method which is capable of<br>producing high-quality shape partitioning. It respects fine shape<br>features and works well on various types of shapes, including<br>natural shapes and mechanical parts.<br>The method combines a procedure for clustering mesh normals<br>with a modification of the mesh chartification technique \cite{Sander_sig03}.<br>For clustering of mesh normals, we adapt Mean Shift,<br>a powerful general purpose technique for clustering scattered data.<br>We demonstrate advantages of our method by comparing it with two<br>state-of-the-art mesh segmentation techniques.}, BOOKTITLE = {Shape Modeling International 2005 (SMI 2005)}, EDITOR = {Spagnuolo, Michaela and Pasko, Alexander and Belyaev, Alexander}, PAGES = {236--243}, ADDRESS = {Cambridge, MA, USA}, }
Endnote
%0 Conference Proceedings %A Yamauchi, Hitoshi %A Lee, Seungyong %A Lee, Yunjin %A Ohtake, Yutaka %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Feature Sensitive Mesh Segmentation with Mean Shift : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2693-0 %F EDOC: 278961 %F OTHER: Local-ID: C125675300671F7B-40645AA425C269E4C1256FC60056CA4F-Yamauchi:FSMS:2005 %R 10.1109/SMI.2005.21 %D 2005 %B 2005 International Conference on Shape Modeling and Applications %Z date of event: 2005-06-13 - 2005-06-17 %C Cambridge, MA, USA %X Feature sensitive mesh segmentation is important for many<br>computer graphics and geometric modeling applications. In this<br>paper, we develop a mesh segmentation method which is capable of<br>producing high-quality shape partitioning. It respects fine shape<br>features and works well on various types of shapes, including<br>natural shapes and mechanical parts.<br>The method combines a procedure for clustering mesh normals<br>with a modification of the mesh chartification technique \cite{Sander_sig03}.<br>For clustering of mesh normals, we adapt Mean Shift,<br>a powerful general purpose technique for clustering scattered data.<br>We demonstrate advantages of our method by comparing it with two<br>state-of-the-art mesh segmentation techniques. %B Shape Modeling International 2005 %E Spagnuolo, Michaela; Pasko, Alexander; Belyaev, Alexander %P 236 - 243 %I IEEE %@ 0-7695-2379-X
Yamauchi, H., Gumhold, S., Zayer, R., and Seidel, H.-P. 2005c. Mesh Segmentation Driven by Gaussian Curvature. The Visual Computer21.
Abstract
Mesh parameterization is a fundamental problem in computer graphics as<br> it allows for texture mapping and facilitates a lot of mesh processing<br> tasks. Although there exists a variety of good parameterization methods<br> for meshes that are topologically equivalent to a disc, the<br> segmentation into nicely parameterizable charts of higher genus meshes<br> has been studied less. In this paper we propose a new segmentation<br> method for the generation of charts that can be flattened<br> efficiently. The integrated Gaussian curvature is used to measure the<br><br> developability of a chart and a robust and simple scheme<br> is proposed to integrate the Gaussian curvature. The segmentation<br> approach evenly distributes Gaussian curvature over the charts and<br> automatically ensures disc-like topology of each chart. For numerical<br> stability, we use area on the Gauss map to represent Gaussian<br> curvature. Resulting parameterization shows that charts generated in<br> this way have less distortion compared to charts generated by other<br> methods.
Export
BibTeX
@article{Yamauchi-et-al_VG05, TITLE = {Mesh Segmentation Driven by Gaussian Curvature}, AUTHOR = {Yamauchi, Hitoshi and Gumhold, Stefan and Zayer, Rhaleb and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0178-2789}, DOI = {10.1007/s00371-005-0319-x}, LOCALID = {Local-ID: C125675300671F7B-CE914B4A765CDD20C125709D0053CAFE-YamauchiMSDGC2005}, PUBLISHER = {Springer International}, ADDRESS = {Berlin}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {Mesh parameterization is a fundamental problem in computer graphics as<br> it allows for texture mapping and facilitates a lot of mesh processing<br> tasks. Although there exists a variety of good parameterization methods<br> for meshes that are topologically equivalent to a disc, the<br> segmentation into nicely parameterizable charts of higher genus meshes<br> has been studied less. In this paper we propose a new segmentation<br> method for the generation of charts that can be flattened<br> efficiently. The integrated Gaussian curvature is used to measure the<br><br> developability of a chart and a robust and simple scheme<br> is proposed to integrate the Gaussian curvature. The segmentation<br> approach evenly distributes Gaussian curvature over the charts and<br> automatically ensures disc-like topology of each chart. For numerical<br> stability, we use area on the Gauss map to represent Gaussian<br> curvature. Resulting parameterization shows that charts generated in<br> this way have less distortion compared to charts generated by other<br> methods.}, JOURNAL = {The Visual Computer}, VOLUME = {21}, PAGES = {659--668}, }
Endnote
%0 Journal Article %A Yamauchi, Hitoshi %A Gumhold, Stefan %A Zayer, Rhaleb %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Mesh Segmentation Driven by Gaussian Curvature : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-270A-F %F EDOC: 278962 %F OTHER: Local-ID: C125675300671F7B-CE914B4A765CDD20C125709D0053CAFE-YamauchiMSDGC2005 %R 10.1007/s00371-005-0319-x %D 2005 %* Review method: peer-reviewed %X Mesh parameterization is a fundamental problem in computer graphics as<br> it allows for texture mapping and facilitates a lot of mesh processing<br> tasks. Although there exists a variety of good parameterization methods<br> for meshes that are topologically equivalent to a disc, the<br> segmentation into nicely parameterizable charts of higher genus meshes<br> has been studied less. In this paper we propose a new segmentation<br> method for the generation of charts that can be flattened<br> efficiently. The integrated Gaussian curvature is used to measure the<br><br> developability of a chart and a robust and simple scheme<br> is proposed to integrate the Gaussian curvature. The segmentation<br> approach evenly distributes Gaussian curvature over the charts and<br> automatically ensures disc-like topology of each chart. For numerical<br> stability, we use area on the Gauss map to represent Gaussian<br> curvature. Resulting parameterization shows that charts generated in<br> this way have less distortion compared to charts generated by other<br> methods. %J The Visual Computer %V 21 %& 659 %P 659 - 668 %I Springer International %C Berlin %@ false %U https://rdcu.be/dHi2V
Weinkauf, T., Theisel, H., Shi, K., Hege, H.-C., and Seidel, H.-P. 2005. Extracting Higher Order Critical Points and Topological Simplification of 3D Vector Fields. IEEE Visualization 2005, IEEE.
Export
BibTeX
@inproceedings{Weinkauf-et-al_VIS05, TITLE = {Extracting Higher Order Critical Points and Topological Simplification of {3D} Vector Fields}, AUTHOR = {Weinkauf, Tino and Theisel, Holger and Shi, Kuangyu and Hege, Hans-Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7803-9462-3}, DOI = {10.1109/VISUAL.2005.1532842}, LOCALID = {Local-ID: C125675300671F7B-C6C2757EF16879D8C1257050003CF30A-Weinkauf05}, PUBLISHER = {IEEE}, YEAR = {2005}, DATE = {2005}, BOOKTITLE = {IEEE Visualization 2005}, EDITOR = {Silva, Cl{\'a}udio T. and Gr{\"o}ller, Eduard and Rushmeier, Holly}, PAGES = {559--566}, ADDRESS = {Minneapolis, MN, USA}, }
Endnote
%0 Conference Proceedings %A Weinkauf, Tino %A Theisel, Holger %A Shi, Kuangyu %A Hege, Hans-Christian %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Extracting Higher Order Critical Points and Topological Simplification of 3D Vector Fields : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-267D-4 %F EDOC: 278965 %F OTHER: Local-ID: C125675300671F7B-C6C2757EF16879D8C1257050003CF30A-Weinkauf05 %R 10.1109/VISUAL.2005.1532842 %D 2005 %B 2005 IEEE Conference on Visualization %Z date of event: 2005-10-23 - 2005-10-28 %C Minneapolis, MN, USA %B IEEE Visualization 2005 %E Silva, Cl&#225;udio T.; Gr&#246;ller, Eduard; Rushmeier, Holly %P 559 - 566 %I IEEE %@ 0-7803-9462-3
Wang, Y. and Seidel, H.-P. 2005. Visualization and Interaction of Autostereogram. Vision, Modeling, and Visualization 2005 (VMV 2005), Akademische Verlagsgesellschaft Aka.
Abstract
In this work we propose a novel approach for realistic fire animation and manipulation. We apply a statistical learning method to an image sequence of a real-world flame to jointly capture flame motion and appearance characteristics. A low-dimensional generic flame model is then robustly matched to the video images. The model parameter values are used as input to drive an Expectation-Maximization algorithm to learn an {\em auto regressive process} with respect to flame dynamics. The generic flame model and the trained motion model enable us to synthesize new, unique flame sequences of arbitrary length in real-time.
Export
BibTeX
@inproceedings{Wang_VMV05, TITLE = {Visualization and Interaction of Autostereogram}, AUTHOR = {Wang, Yan and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-89838-068-8}, PUBLISHER = {Akademische Verlagsgesellschaft Aka}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {In this work we propose a novel approach for realistic fire animation and manipulation. We apply a statistical learning method to an image sequence of a real-world flame to jointly capture flame motion and appearance characteristics. A low-dimensional generic flame model is then robustly matched to the video images. The model parameter values are used as input to drive an Expectation-Maximization algorithm to learn an {\em auto regressive process} with respect to flame dynamics. The generic flame model and the trained motion model enable us to synthesize new, unique flame sequences of arbitrary length in real-time.}, BOOKTITLE = {Vision, Modeling, and Visualization 2005 (VMV 2005)}, EDITOR = {Greiner, G{\"u}nther and Hornegger, Joachim and Niemann, Heinrich and Stamminger, Marc}, PAGES = {327--334}, ADDRESS = {Erlangen, Germany}, }
Endnote
%0 Conference Proceedings %A Wang, Yan %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Visualization and Interaction of Autostereogram : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0023-E700-6 %D 2005 %B Vision, Modeling, and Visualization 2005 %Z date of event: 2005-11-16 - 2005-11-18 %C Erlangen, Germany %X In this work we propose a novel approach for realistic fire animation and manipulation. We apply a statistical learning method to an image sequence of a real-world flame to jointly capture flame motion and appearance characteristics. A low-dimensional generic flame model is then robustly matched to the video images. The model parameter values are used as input to drive an Expectation-Maximization algorithm to learn an {\em auto regressive process} with respect to flame dynamics. The generic flame model and the trained motion model enable us to synthesize new, unique flame sequences of arbitrary length in real-time. %B Vision, Modeling, and Visualization 2005 %E Greiner, G&#252;nther; Hornegger, Joachim; Niemann, Heinrich; Stamminger, Marc %P 327 - 334 %I Akademische Verlagsgesellschaft Aka %@ 3-89838-068-8
Wald, I., Friedrich, H., Marmitt, G., Slusallek, P., and Seidel, H.-P. 2005. Faster Isosurface Ray Tracing Using Implicit KD-Trees. IEEE Transactions on Visualization and Computer Graphics11, 5.
Export
BibTeX
@article{wald:05:IsoSurfaceRT, TITLE = {Faster Isosurface Ray Tracing Using Implicit {KD}-Trees}, AUTHOR = {Wald, Ingo and Friedrich, Heiko and Marmitt, Gerd and Slusallek, Philipp and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2005.79}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {Los Alamitos, CA}, YEAR = {2005}, DATE = {2005}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics}, VOLUME = {11}, NUMBER = {5}, PAGES = {562--572}, }
Endnote
%0 Journal Article %A Wald, Ingo %A Friedrich, Heiko %A Marmitt, Gerd %A Slusallek, Philipp %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Faster Isosurface Ray Tracing Using Implicit KD-Trees : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0023-E76A-B %R 10.1109/TVCG.2005.79 %7 2005 %D 2005 %J IEEE Transactions on Visualization and Computer Graphics %V 11 %N 5 %& 562 %P 562 - 572 %I IEEE Computer Society %C Los Alamitos, CA %@ false
Wald, I. and Seidel, H.-P. 2005a. Interactive Ray Tracing of Point-based Models. Symposium on Point-based Graphics 05 (SPBG 2005), Eurographics Association.
Export
BibTeX
@inproceedings{Wald-Seidel_SPBG05, TITLE = {Interactive Ray Tracing of Point-based Models}, AUTHOR = {Wald, Ingo and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-905673-20-7}, DOI = {10.2312/SPBG/SPBG05/009-016}, PUBLISHER = {Eurographics Association}, YEAR = {2005}, DATE = {2005}, BOOKTITLE = {Symposium on Point-based Graphics 05 (SPBG 2005)}, EDITOR = {Fellner, Dieter and M{\"o}ller, Torsten and Spencer, Stephen}, PAGES = {9--16}, ADDRESS = {Stony Brook, NY, USA}, }
Endnote
%0 Conference Proceedings %A Wald, Ingo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive Ray Tracing of Point-based Models : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0023-E763-A %R 10.2312/SPBG/SPBG05/009-016 %D 2005 %B Second Eurographics / IEEE VGTC Conference on Point-Based Graphics %Z date of event: 2005-06-21 - 2005-06-22 %C Stony Brook, NY, USA %B Symposium on Point-based Graphics 05 %E Fellner, Dieter; M&#246;ller, Torsten; Spencer, Stephen %P 9 - 16 %I Eurographics Association %@ 3-905673-20-7
Wald, I. and Seidel, H.-P. 2005b. Interactive Ray Tracing of Point-based Models. SIGGRAPH ’05: ACM SIGGRAPH 2005 Sketches, ACM.
Export
BibTeX
@inproceedings{DBLP:conf/siggraph/WaldS05, TITLE = {Interactive Ray Tracing of Point-based Models}, AUTHOR = {Wald, Ingo and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4503-7827-7}, DOI = {10.1145/1187112.1187176}, PUBLISHER = {ACM}, YEAR = {2005}, DATE = {2005}, BOOKTITLE = {SIGGRAPH '05: ACM SIGGRAPH 2005 Sketches}, EDITOR = {Buhler, Juan}, PAGES = {54}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Wald, Ingo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive Ray Tracing of Point-based Models : %G eng %U http://hdl.handle.net/21.11116/0000-000F-3D14-7 %R 10.1145/1187112.1187176 %D 2005 %B International Conference on Computer Graphics and Interactive Techniques 2005 %Z date of event: 2005-07-31 - 2005-08-04 %C Los Angeles, CA, USA %B SIGGRAPH '05: ACM SIGGRAPH 2005 Sketches %E Buhler, Juan %P 54 %I ACM %@ 978-1-4503-7827-7
Theobalt, C., Ahmed, N., de Aguiar, E., et al. 2005a. Joint Motion and Reflectance Capture for Relightable 3D Video. SIGGRAPH ’05: ACM SIGGRAPH 2005 Sketches, ACM.
Export
BibTeX
@inproceedings{DBLP:conf/siggraph/TheobaltAAZLMS05, TITLE = {Joint Motion and Reflectance Capture for Relightable {3D} Video}, AUTHOR = {Theobalt, Christian and Ahmed, Naveed and de Aguiar, Edilson and Ziegler, Gernot and Lensch, Hendrik P. A. and Magnor, Marcus A. and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4503-7827-7}, DOI = {10.1145/1187112.1187200}, PUBLISHER = {ACM}, YEAR = {2005}, DATE = {2005}, BOOKTITLE = {SIGGRAPH '05: ACM SIGGRAPH 2005 Sketches}, EDITOR = {Buhler, Juan}, PAGES = {73}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Theobalt, Christian %A Ahmed, Naveed %A de Aguiar, Edilson %A Ziegler, Gernot %A Lensch, Hendrik P. A. %A Magnor, Marcus A. %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Joint Motion and Reflectance Capture for Relightable 3D Video : %G eng %U http://hdl.handle.net/21.11116/0000-000F-3CF9-6 %R 10.1145/1187112.1187200 %D 2005 %B International Conference on Computer Graphics and Interactive Techniques 2005 %Z date of event: 2005-07-31 - 2005-08-04 %C Los Angeles, CA, USA %B SIGGRAPH '05: ACM SIGGRAPH 2005 Sketches %E Buhler, Juan %P 73 %I ACM %@ 978-1-4503-7827-7
Theobalt, C., Ahmed, N., de Aguiar, E., et al. 2005b. Joint Motion and Reflectance Capture for Creating Relightable 3D Videos. .
Abstract
\begin{abstract} Passive optical motion capture is able to provide authentically animated, photo-realistically and view-dependently textured models of real people. To import real-world characters into virtual environments, however, also surface reflectance properties must be known. We describe a video-based modeling approach that captures human motion as well as reflectance characteristics from a handful of synchronized video recordings. The presented method is able to recover spatially varying reflectance properties of clothes % dynamic objects ? by exploiting the time-varying orientation of each surface point with respect to camera and light direction. The resulting model description enables us to match animated subject appearance to different lighting conditions, as well as to interchange surface attributes among different people, e.g. for virtual dressing. Our contribution allows populating virtual worlds with correctly relit, real-world people.\\ \end{abstract}
Export
BibTeX
@techreport{TheobaltTR2005, TITLE = {Joint Motion and Reflectance Capture for Creating Relightable {3D} Videos}, AUTHOR = {Theobalt, Christian and Ahmed, Naveed and de Aguiar, Edilson and Ziegler, Gernot and Lensch, Hendrik and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, NUMBER = {MPI-I-2005-4-004}, LOCALID = {Local-ID: C1256BDE005F57A8-5B757D3AA9584EEBC12570A7003C813D-TheobaltTR2005}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {\begin{abstract} Passive optical motion capture is able to provide authentically animated, photo-realistically and view-dependently textured models of real people. To import real-world characters into virtual environments, however, also surface reflectance properties must be known. We describe a video-based modeling approach that captures human motion as well as reflectance characteristics from a handful of synchronized video recordings. The presented method is able to recover spatially varying reflectance properties of clothes % dynamic objects ? by exploiting the time-varying orientation of each surface point with respect to camera and light direction. The resulting model description enables us to match animated subject appearance to different lighting conditions, as well as to interchange surface attributes among different people, e.g. for virtual dressing. Our contribution allows populating virtual worlds with correctly relit, real-world people.\\ \end{abstract}}, }
Endnote
%0 Report %A Theobalt, Christian %A Ahmed, Naveed %A de Aguiar, Edilson %A Ziegler, Gernot %A Lensch, Hendrik %A Magnor, Marcus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Programming Logics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Joint Motion and Reflectance Capture for Creating Relightable 3D Videos : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2879-B %F EDOC: 520731 %F OTHER: Local-ID: C1256BDE005F57A8-5B757D3AA9584EEBC12570A7003C813D-TheobaltTR2005 %D 2005 %X \begin{abstract} Passive optical motion capture is able to provide authentically animated, photo-realistically and view-dependently textured models of real people. To import real-world characters into virtual environments, however, also surface reflectance properties must be known. We describe a video-based modeling approach that captures human motion as well as reflectance characteristics from a handful of synchronized video recordings. The presented method is able to recover spatially varying reflectance properties of clothes % dynamic objects ? by exploiting the time-varying orientation of each surface point with respect to camera and light direction. The resulting model description enables us to match animated subject appearance to different lighting conditions, as well as to interchange surface attributes among different people, e.g. for virtual dressing. Our contribution allows populating virtual worlds with correctly relit, real-world people.\\ \end{abstract}
Theobalt, C., Magnor, M., and Seidel, H.-P. 2005c. 3D Image Analysis and Synthesis at MPI Informatik. Vision, Video, and Graphics 2005 (VVG 2005), The Eurographics Association.
Export
BibTeX
@inproceedings{Theobalt-et-al_VVG05, TITLE = {{3D} Image Analysis and Synthesis at {MPI} Informatik}, AUTHOR = {Theobalt, Christian and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-905673-57-6}, DOI = {10.2312/vvg.20051011}, LOCALID = {Local-ID: C1256BDE005F57A8-B31A222C14C252B0C12570B4003754CD-Theobald05:3DIAS}, PUBLISHER = {The Eurographics Association}, YEAR = {2005}, DATE = {2005}, BOOKTITLE = {Vision, Video, and Graphics 2005 (VVG 2005)}, EDITOR = {Trucco, E. and Chantler, M.}, PAGES = {85--92}, ADDRESS = {Edinburgh, UK}, }
Endnote
%0 Conference Proceedings %A Theobalt, Christian %A Magnor, Marcus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Programming Logics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T 3D Image Analysis and Synthesis at MPI Informatik : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-286D-7 %F EDOC: 520706 %F OTHER: Local-ID: C1256BDE005F57A8-B31A222C14C252B0C12570B4003754CD-Theobald05:3DIAS %R 10.2312/vvg.20051011 %D 2005 %B Vision, Video, and Graphics 2005 %Z date of event: 2005-07-07 - 2005-07-08 %C Edinburgh, UK %B Vision, Video, and Graphics 2005 %E Trucco, E.; Chantler, M. %P 85 - 92 %I The Eurographics Association %@ 3-905673-57-6
Theisel, H., Sahner, J., Weinkauf, T., Hege, H.-C., and Seidel, H.-P. 2005a. Extraction of Parallel Vector Surfaces in 3D Time-dependent Fields and Applications to Vortex Core Line Tracking. IEEE Visualization 2005, IEEE.
Export
BibTeX
@inproceedings{Theisel-et-al_VIS05, TITLE = {Extraction of Parallel Vector Surfaces in {3D} Time-dependent Fields and Applications to Vortex Core Line Tracking}, AUTHOR = {Theisel, Holger and Sahner, Jan and Weinkauf, Tino and Hege, Hans-Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7803-9462-3}, DOI = {10.1109/VISUAL.2005.1532851}, LOCALID = {Local-ID: C125675300671F7B-A60B02A24F4D8A3FC1257050003F02F0-Theisel05}, PUBLISHER = {IEEE}, YEAR = {2005}, DATE = {2005}, BOOKTITLE = {IEEE Visualization 2005}, EDITOR = {Silva, Cl{\'a}udio T. and Gr{\"o}ller, Eduard and Rushmeier, Holly}, PAGES = {631--638}, ADDRESS = {Minneapolis, MN, USA}, }
Endnote
%0 Conference Proceedings %A Theisel, Holger %A Sahner, Jan %A Weinkauf, Tino %A Hege, Hans-Christian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Extraction of Parallel Vector Surfaces in 3D Time-dependent Fields and Applications to Vortex Core Line Tracking : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2680-9 %F EDOC: 278969 %F OTHER: Local-ID: C125675300671F7B-A60B02A24F4D8A3FC1257050003F02F0-Theisel05 %R 10.1109/VISUAL.2005.1532851 %D 2005 %B 2005 IEEE Conference on Visualization %Z date of event: 2005-10-23 - 2005-10-28 %C Minneapolis, MN, USA %B IEEE Visualization 2005 %E Silva, Cl&#225;udio T.; Gr&#246;ller, Eduard; Rushmeier, Holly %P 631 - 638 %I IEEE %@ 0-7803-9462-3
Theisel, H., Weinkauf, T., Hege, H.-C., and Seidel, H.-P. 2005b. Topological Methods for 2D Time-Dependent Vector Fields Based on Stream Lines and Path Lines. IEEE Transactions on Visualization and Computer Graphics11, 4.
Export
BibTeX
@article{Theisel-et-al_VCG05, TITLE = {Topological Methods for {2D} Time-Dependent Vector Fields Based on Stream Lines and Path Lines}, AUTHOR = {Theisel, Holger and Weinkauf, Tino and Hege, Hans-Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2005.68}, LOCALID = {Local-ID: C125675300671F7B-6B3196CF3A2F552DC1256FB6005A5F0B-Theisel2005}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {New York, NY}, YEAR = {2005}, DATE = {2005}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics}, VOLUME = {11}, NUMBER = {4}, PAGES = {383--394}, }
Endnote
%0 Journal Article %A Theisel, Holger %A Weinkauf, Tino %A Hege, Hans-Christian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Topological Methods for 2D Time-Dependent Vector Fields Based on Stream Lines and Path Lines : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-280E-0 %F EDOC: 278968 %F OTHER: Local-ID: C125675300671F7B-6B3196CF3A2F552DC1256FB6005A5F0B-Theisel2005 %R 10.1109/TVCG.2005.68 %D 2005 %* Review method: peer-reviewed %J IEEE Transactions on Visualization and Computer Graphics %V 11 %N 4 %& 383 %P 383 - 394 %I IEEE Computer Society %C New York, NY %@ false
Tarini, M., Lensch, H.P.A., Goesele, M., and Seidel, H.-P. 2005. 3D Acquisition of Mirroring Objects Using Striped Patterns. Graphical Models67, 4.
Abstract
Objects with mirroring optical characteristics are left out of the<br>scope of most 3D scanning methods. We present here a new automatic<br>acquisition approach, shape-from-distortion, that focuses on that<br>category of objects, requires only a still camera and a color monitor, and<br>produces range scans (plus a normal and a reflectance map) of the<br>target.<br> <br>Our technique consists of two steps: first, an improved<br>environment matte is captured for the mirroring object, using the<br>interference of patterns with different frequencies in order to<br>obtain sub-pixel accuracy. Then, the matte is converted into a<br>normal and a depth map by exploiting the self coherence of a<br>surface when integrating the normal map along different paths.<br> <br>The results show very high accuracy, capturing even smallest<br>surface details. The acquired depth maps can be further processed<br>using standard techniques to produce a complete 3D mesh of the<br>object.
Export
BibTeX
@article{Tarini-et-al_GM05, TITLE = {{3D} Acquisition of Mirroring Objects Using Striped Patterns}, AUTHOR = {Tarini, Marco and Lensch, Hendrik P. A. and Goesele, Michael and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1524-0703}, DOI = {10.1016/j.gmod.2004.11.002}, LOCALID = {Local-ID: C125675300671F7B-994B5B71542B2F8BC1256EC40062AEB8-Tarini:2004:AMO}, PUBLISHER = {Academic Press}, ADDRESS = {San Diego, Calif.}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {Objects with mirroring optical characteristics are left out of the<br>scope of most 3D scanning methods. We present here a new automatic<br>acquisition approach, shape-from-distortion, that focuses on that<br>category of objects, requires only a still camera and a color monitor, and<br>produces range scans (plus a normal and a reflectance map) of the<br>target.<br> <br>Our technique consists of two steps: first, an improved<br>environment matte is captured for the mirroring object, using the<br>interference of patterns with different frequencies in order to<br>obtain sub-pixel accuracy. Then, the matte is converted into a<br>normal and a depth map by exploiting the self coherence of a<br>surface when integrating the normal map along different paths.<br> <br>The results show very high accuracy, capturing even smallest<br>surface details. The acquired depth maps can be further processed<br>using standard techniques to produce a complete 3D mesh of the<br>object.}, JOURNAL = {Graphical Models}, VOLUME = {67}, NUMBER = {4}, PAGES = {233--259}, }
Endnote
%0 Journal Article %A Tarini, Marco %A Lensch, Hendrik P. A. %A Goesele, Michael %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T 3D Acquisition of Mirroring Objects Using Striped Patterns : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-257C-0 %F EDOC: 278970 %F OTHER: Local-ID: C125675300671F7B-994B5B71542B2F8BC1256EC40062AEB8-Tarini:2004:AMO %R 10.1016/j.gmod.2004.11.002 %D 2005 %* Review method: peer-reviewed %X Objects with mirroring optical characteristics are left out of the<br>scope of most 3D scanning methods. We present here a new automatic<br>acquisition approach, shape-from-distortion, that focuses on that<br>category of objects, requires only a still camera and a color monitor, and<br>produces range scans (plus a normal and a reflectance map) of the<br>target.<br> <br>Our technique consists of two steps: first, an improved<br>environment matte is captured for the mirroring object, using the<br>interference of patterns with different frequencies in order to<br>obtain sub-pixel accuracy. Then, the matte is converted into a<br>normal and a depth map by exploiting the self coherence of a<br>surface when integrating the normal map along different paths.<br> <br>The results show very high accuracy, capturing even smallest<br>surface details. The acquired depth maps can be further processed<br>using standard techniques to produce a complete 3D mesh of the<br>object. %J Graphical Models %V 67 %N 4 %& 233 %P 233 - 259 %I Academic Press %C San Diego, Calif. %@ false
Stoll, C., Gumhold, S., and Seidel, H.-P. 2005. Visualization with Stylized Line Primitives. IEEE Visualization 2005, IEEE.
Abstract
Line primitives are a very powerful visual attribute used for scientific<br>visualization and in particular for 3D vector-field visualization.<br>We extend the basic line primitives with additional visual attributes<br>including color, line width, texture and orientation. To implement<br>the visual attributes we represent the stylized line primitives as generalized<br>cylinders. One important contribution of our work is an efficient<br>rendering algorithm for stylized lines, which is hybrid in the<br>sense that it uses both CPU and GPU based rendering. We improve<br>the depth perception with a shadow algorithm. We present several<br>applications for the visualization with stylized lines among which<br>are the visualizations of 3D vector fields and molecular structures.
Export
BibTeX
@inproceedings{Stoll-et-al_VIS05, TITLE = {Visualization with Stylized Line Primitives}, AUTHOR = {Stoll, Carsten and Gumhold, Stefan and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7803-9462-3}, DOI = {10.1109/VISUAL.2005.1532859}, LOCALID = {Local-ID: C125675300671F7B-AE8C7954D1742C9AC12570D9004CD75A-Stoll2004a}, PUBLISHER = {IEEE}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {Line primitives are a very powerful visual attribute used for scientific<br>visualization and in particular for 3D vector-field visualization.<br>We extend the basic line primitives with additional visual attributes<br>including color, line width, texture and orientation. To implement<br>the visual attributes we represent the stylized line primitives as generalized<br>cylinders. One important contribution of our work is an efficient<br>rendering algorithm for stylized lines, which is hybrid in the<br>sense that it uses both CPU and GPU based rendering. We improve<br>the depth perception with a shadow algorithm. We present several<br>applications for the visualization with stylized lines among which<br>are the visualizations of 3D vector fields and molecular structures.}, BOOKTITLE = {IEEE Visualization 2005}, EDITOR = {Silva, Cl{\'a}udio T. and Gr{\"o}ller, Eduard and Rushmeier, Holly}, PAGES = {695--702}, ADDRESS = {Minneapolis, MN, USA}, }
Endnote
%0 Conference Proceedings %A Stoll, Carsten %A Gumhold, Stefan %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Visualization with Stylized Line Primitives : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-283A-A %F EDOC: 278972 %F OTHER: Local-ID: C125675300671F7B-AE8C7954D1742C9AC12570D9004CD75A-Stoll2004a %R 10.1109/VISUAL.2005.1532859 %D 2005 %B 2005 IEEE Conference on Visualization %Z date of event: 2005-10-23 - 2005-10-28 %C Minneapolis, MN, USA %X Line primitives are a very powerful visual attribute used for scientific<br>visualization and in particular for 3D vector-field visualization.<br>We extend the basic line primitives with additional visual attributes<br>including color, line width, texture and orientation. To implement<br>the visual attributes we represent the stylized line primitives as generalized<br>cylinders. One important contribution of our work is an efficient<br>rendering algorithm for stylized lines, which is hybrid in the<br>sense that it uses both CPU and GPU based rendering. We improve<br>the depth perception with a shadow algorithm. We present several<br>applications for the visualization with stylized lines among which<br>are the visualizations of 3D vector fields and molecular structures. %B IEEE Visualization 2005 %E Silva, Cl&#225;udio T.; Gr&#246;ller, Eduard; Rushmeier, Holly %P 695 - 702 %I IEEE %@ 0-7803-9462-3
Seidel, H.-P. 2005. Computer Graphics - More than Beautiful Images. The Visual Computer21.
Export
BibTeX
@article{DBLP:journals/vc/Seidel05, TITLE = {Computer Graphics -- More than Beautiful Images}, AUTHOR = {Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0178-2789}, DOI = {10.1007/S00371-005-0348-5}, PUBLISHER = {Springer International}, ADDRESS = {Berlin}, YEAR = {2005}, DATE = {2005}, JOURNAL = {The Visual Computer}, VOLUME = {21}, PAGES = {520--521}, }
Endnote
%0 Journal Article %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society %T Computer Graphics - More than Beautiful Images : %G eng %U http://hdl.handle.net/21.11116/0000-000F-42F9-E %R 10.1007/S00371-005-0348-5 %D 2005 %J The Visual Computer %V 21 %& 520 %P 520 - 521 %I Springer International %C Berlin %@ false %U https://rdcu.be/dHi5S
Schlosser, G., Hesser, J., Zeilfelder, F., et al. 2005. Fast Visualization by Shear-warp on Quadratic Super-spline Models Using Wavelet Data Decompositions. 2005 IEEE Visualization Conference, IEEE.
Export
BibTeX
@inproceedings{Schlosser-et-al_VIS05, TITLE = {Fast Visualization by Shear-warp on Quadratic Super-spline Models Using Wavelet Data Decompositions}, AUTHOR = {Schlosser, Gregor and Hesser, J{\"u}rgen and Zeilfelder, Frank and R{\"o}ssl, Christian and M{\"a}nner, Reinhard and N{\"u}rnberger, G{\"u}nther and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7803-9462-3}, DOI = {10.1109/VISUAL.2005.1532816}, LOCALID = {Local-ID: C125675300671F7B-E47AEEFDA1D64044C1257027004E68DD-schlosser:swqss:2005}, PUBLISHER = {IEEE}, YEAR = {2005}, DATE = {2005}, BOOKTITLE = {2005 IEEE Visualization Conference}, EDITOR = {Silva, Cl{\'a}udio T. and Gr{\"o}ller, Eduard and Rushmeier, Holly}, PAGES = {351--358}, ADDRESS = {Minneapolis, MN, USA}, }
Endnote
%0 Conference Proceedings %A Schlosser, Gregor %A Hesser, J&#252;rgen %A Zeilfelder, Frank %A R&#246;ssl, Christian %A M&#228;nner, Reinhard %A N&#252;rnberger, G&#252;nther %A Seidel, Hans-Peter %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Fast Visualization by Shear-warp on Quadratic Super-spline Models Using Wavelet Data Decompositions : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2690-5 %F EDOC: 278976 %F OTHER: Local-ID: C125675300671F7B-E47AEEFDA1D64044C1257027004E68DD-schlosser:swqss:2005 %R 10.1109/VISUAL.2005.1532816 %D 2005 %B 2005 IEEE Conference on Visualization %Z date of event: 2005-10-23 - 2005-10-28 %C Minneapolis, MN, USA %B 2005 IEEE Visualization Conference %E Silva, Cl&#225;udio T.; Gr&#246;ller, Eduard; Rushmeier, Holly %P 351 - 358 %I IEEE %@ 0-7803-9462-3
Schall, O., Belyaev, A., and Seidel, H.-P. 2005a. Sparse meshing of uncertain and noisy surface scattered data. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
In this paper, we develop a method for generating a high-quality approximation of a noisy set of points sampled from a smooth surface by a sparse triangle mesh. The main idea of the method consists of defining an appropriate set of approximation centers and use them as the vertices of a mesh approximating given scattered data. To choose the approximation centers, a clustering procedure is used. With every point of the input data we associate a local uncertainty measure which is used to estimate the importance of the point contribution to the reconstructed surface. Then a global uncertainty measure is constructed from local ones. The approximation centers are chosen as the points where the global uncertainty measure attains its local minima. It allows us to achieve a high-quality approximation of uncertain and noisy point data by a sparse mesh. An interesting feature of our approach is that the uncertainty measures take into account the normal directions estimated at the scattered points. In particular it results in accurate reconstruction of high-curvature regions.
Export
BibTeX
@techreport{SchallBelyaevSeidel2005, TITLE = {Sparse meshing of uncertain and noisy surface scattered data}, AUTHOR = {Schall, Oliver and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2005-4-002}, NUMBER = {MPI-I-2005-4-002}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {In this paper, we develop a method for generating a high-quality approximation of a noisy set of points sampled from a smooth surface by a sparse triangle mesh. The main idea of the method consists of defining an appropriate set of approximation centers and use them as the vertices of a mesh approximating given scattered data. To choose the approximation centers, a clustering procedure is used. With every point of the input data we associate a local uncertainty measure which is used to estimate the importance of the point contribution to the reconstructed surface. Then a global uncertainty measure is constructed from local ones. The approximation centers are chosen as the points where the global uncertainty measure attains its local minima. It allows us to achieve a high-quality approximation of uncertain and noisy point data by a sparse mesh. An interesting feature of our approach is that the uncertainty measures take into account the normal directions estimated at the scattered points. In particular it results in accurate reconstruction of high-curvature regions.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Schall, Oliver %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Sparse meshing of uncertain and noisy surface scattered data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-683C-1 %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2005-4-002 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2005 %P 20 p. %X In this paper, we develop a method for generating a high-quality approximation of a noisy set of points sampled from a smooth surface by a sparse triangle mesh. The main idea of the method consists of defining an appropriate set of approximation centers and use them as the vertices of a mesh approximating given scattered data. To choose the approximation centers, a clustering procedure is used. With every point of the input data we associate a local uncertainty measure which is used to estimate the importance of the point contribution to the reconstructed surface. Then a global uncertainty measure is constructed from local ones. The approximation centers are chosen as the points where the global uncertainty measure attains its local minima. It allows us to achieve a high-quality approximation of uncertain and noisy point data by a sparse mesh. An interesting feature of our approach is that the uncertainty measures take into account the normal directions estimated at the scattered points. In particular it results in accurate reconstruction of high-curvature regions. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Schall, O., Belyaev, A., and Seidel, H.-P. 2005b. Robust Filtering of Noisy Scattered Point Data. Symposium on Point-based Graphics 05 (SPBG 2005), Eurographics Association.
Abstract
In this paper, we develop a method for robust filtering of a<br>noisy set of points sampled from a smooth surface. The main idea<br>of the method consists of using a kernel density estimation<br>technique for point clustering. Specifically, we use a<br>mean-shift based clustering procedure. With every point of the<br>input data we associate a local likelihood measure capturing the<br>probability that a 3D point is located on the sampled surface.<br>The likelihood measure takes into account the normal directions<br>estimated at the scattered points. Our filtering procedure<br>suppresses noise of different amplitudes and allows for an easy<br>detection of outliers which are then automatically removed by<br>simple thresholding. The remaining set of maximum likelihood<br>points delivers an accurate point-based approximation of the<br>surface. We also show that while some established meshing<br>techniques often fail to reconstruct the surface from original<br>noisy point scattered data, they work well in conjunction with<br>our filtering method.
Export
BibTeX
@inproceedings{Schall-et-al_SPBG05, TITLE = {Robust Filtering of Noisy Scattered Point Data}, AUTHOR = {Schall, Oliver and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-905673-20-7}, DOI = {10.2312/SPBG/SPBG05/071-077}, LOCALID = {Local-ID: C125675300671F7B-12066FE35B2D6EF2C125700C0065CE40-pbg05sbs}, PUBLISHER = {Eurographics Association}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {In this paper, we develop a method for robust filtering of a<br>noisy set of points sampled from a smooth surface. The main idea<br>of the method consists of using a kernel density estimation<br>technique for point clustering. Specifically, we use a<br>mean-shift based clustering procedure. With every point of the<br>input data we associate a local likelihood measure capturing the<br>probability that a 3D point is located on the sampled surface.<br>The likelihood measure takes into account the normal directions<br>estimated at the scattered points. Our filtering procedure<br>suppresses noise of different amplitudes and allows for an easy<br>detection of outliers which are then automatically removed by<br>simple thresholding. The remaining set of maximum likelihood<br>points delivers an accurate point-based approximation of the<br>surface. We also show that while some established meshing<br>techniques often fail to reconstruct the surface from original<br>noisy point scattered data, they work well in conjunction with<br>our filtering method.}, BOOKTITLE = {Symposium on Point-based Graphics 05 (SPBG 2005)}, EDITOR = {Pauly, Mark and Zwicker, Matthias}, PAGES = {71--77}, ADDRESS = {Stony Brook, NY, USA}, }
Endnote
%0 Conference Proceedings %A Schall, Oliver %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Robust Filtering of Noisy Scattered Point Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2797-F %F EDOC: 278979 %F OTHER: Local-ID: C125675300671F7B-12066FE35B2D6EF2C125700C0065CE40-pbg05sbs %R 10.2312/SPBG/SPBG05/071-077 %D 2005 %B 2005 Eurographics Symposium on Point-based Graphics %Z date of event: 2005-06-21 - 2005-06-22 %C Stony Brook, NY, USA %X In this paper, we develop a method for robust filtering of a<br>noisy set of points sampled from a smooth surface. The main idea<br>of the method consists of using a kernel density estimation<br>technique for point clustering. Specifically, we use a<br>mean-shift based clustering procedure. With every point of the<br>input data we associate a local likelihood measure capturing the<br>probability that a 3D point is located on the sampled surface.<br>The likelihood measure takes into account the normal directions<br>estimated at the scattered points. Our filtering procedure<br>suppresses noise of different amplitudes and allows for an easy<br>detection of outliers which are then automatically removed by<br>simple thresholding. The remaining set of maximum likelihood<br>points delivers an accurate point-based approximation of the<br>surface. We also show that while some established meshing<br>techniques often fail to reconstruct the surface from original<br>noisy point scattered data, they work well in conjunction with<br>our filtering method. %B Symposium on Point-based Graphics 05 %E Pauly, Mark; Zwicker, Matthias %P 71 - 77 %I Eurographics Association %@ 3-905673-20-7
Ohtake, Y., Belyaev, A., and Seidel, H.-P. 2005a. Multi-scale and Adaptive CS-RBFs for Shape Reconstruction from Clouds of Points. In: Advances in Multiresolution for Geometric Modelling. Springer.
Export
BibTeX
@incollection{DBLP:books/sp/05/OhtakeBS05, TITLE = {Multi-scale and Adaptive {CS}-{RBFs} for Shape Reconstruction from Clouds of Points}, AUTHOR = {Ohtake, Yutaka and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1612-3786}, ISBN = {978-3-540-21462-5}, DOI = {10.1007/3-540-26808-1_8}, PUBLISHER = {Springer}, YEAR = {2005}, DATE = {2005}, BOOKTITLE = {Advances in Multiresolution for Geometric Modelling}, EDITOR = {Dodgson, Neil A. and Floater, Michael S. and Sabin, Malcolm A.}, PAGES = {143--154}, SERIES = {Mathematics and Visualization}, }
Endnote
%0 Book Section %A Ohtake, Yutaka %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Multi-scale and Adaptive CS-RBFs for Shape Reconstruction from Clouds of Points : %G eng %U http://hdl.handle.net/21.11116/0000-000F-2A08-A %R 10.1007/3-540-26808-1_8 %D 2005 %B Advances in Multiresolution for Geometric Modelling %E Dodgson, Neil A.; Floater, Michael S.; Sabin, Malcolm A. %P 143 - 154 %I Springer %@ 978-3-540-21462-5 %S Mathematics and Visualization %@ false %U https://rdcu.be/dE1xI
Ohtake, Y., Belyaev, A., Alexa, M., Turk, G., and Seidel, H.-P. 2005b. Multi-level Partition of Unity Implicits. SIGGRAPH ’05: ACM SIGGRAPH 2005 Courses, ACM.
Export
BibTeX
@inproceedings{DBLP:conf/siggraph/OhtakeBATS05, TITLE = {Multi-level Partition of Unity Implicits}, AUTHOR = {Ohtake, Yutaka and Belyaev, Alexander and Alexa, Marc and Turk, Greg and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4503-7833-8}, DOI = {10.1145/1198555.1198649}, PUBLISHER = {ACM}, YEAR = {2005}, DATE = {2005}, BOOKTITLE = {SIGGRAPH '05: ACM SIGGRAPH 2005 Courses}, EDITOR = {Fujii, John}, PAGES = {173--180}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Ohtake, Yutaka %A Belyaev, Alexander %A Alexa, Marc %A Turk, Greg %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Multi-level Partition of Unity Implicits : %G eng %U http://hdl.handle.net/21.11116/0000-000F-3CCD-8 %R 10.1145/1198555.1198649 %D 2005 %B International Conference on Computer Graphics and Interactive Techniques 2005 %Z date of event: 2005-07-31 - 2005-08-04 %C Los Angeles, CA, USA %B SIGGRAPH '05: ACM SIGGRAPH 2005 Courses %E Fujii, John %P 173 - 180 %I ACM %@ 978-1-4503-7833-8
Ohtake, Y., Belyaev, A., and Seidel, H.-P. 2005c. 3D Scattered Data Interpolation and Approximation with Multilevel Compactly Supported RBFs. Graphical Models (Proc. SMI 2003)67, 3.
Export
BibTeX
@article{Ohtake-gmod05, TITLE = {{3D} Scattered Data Interpolation and Approximation with Multilevel Compactly Supported {RBFs}}, AUTHOR = {Ohtake, Yutaka and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1524-0703}, DOI = {10.1016/j.gmod.2004.06.003}, PUBLISHER = {Academic Press}, ADDRESS = {San Diego, Calif.}, YEAR = {2005}, DATE = {2005}, JOURNAL = {Graphical Models (Proc. SMI)}, VOLUME = {67}, NUMBER = {3}, PAGES = {150--165}, BOOKTITLE = {SMI 2003 special issue}, EDITOR = {Pasko, Alexander and Spagnuolo, Michela}, }
Endnote
%0 Journal Article %A Ohtake, Yutaka %A Belyaev, Alexander %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T 3D Scattered Data Interpolation and Approximation with Multilevel Compactly Supported RBFs : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0023-E78B-1 %R 10.1016/j.gmod.2004.06.003 %7 2004-09-29 %D 2005 %J Graphical Models %V 67 %N 3 %& 150 %P 150 - 165 %I Academic Press %C San Diego, Calif. %@ false %B SMI 2003 special issue %O SMI 2003
Ohtake, Y., Belyaev, A., and Seidel, H.-P. 2005d. An Integrating Approach to Meshing Scattered Point Data. Proceedings SPM 2005, ACM.
Export
BibTeX
@inproceedings{Ohtake-spm05, TITLE = {An Integrating Approach to Meshing Scattered Point Data}, AUTHOR = {Ohtake, Yutaka and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1145/1060244.1060252}, PUBLISHER = {ACM}, YEAR = {2005}, DATE = {2005}, BOOKTITLE = {Proceedings SPM 2005}, EDITOR = {Spencer, Stephen N.}, PAGES = {61--69}, ADDRESS = {Cambridge, MA, USA}, }
Endnote
%0 Conference Proceedings %A Ohtake, Yutaka %A Belyaev, Alexander %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T An Integrating Approach to Meshing Scattered Point Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0023-E737-E %R 10.1145/1060244.1060252 %D 2005 %B ACM Symposium on Solid and Physical Modeling 2005 %Z date of event: 2005-06-13 - 2005-06-15 %C Cambridge, MA, USA %B Proceedings SPM 2005 %E Spencer, Stephen N. %P 61 - 69 %I ACM
Ohtake, Y., Belyaev, A., and Seidel, H.-P. 2005e. Multi-scale and Adaptive CS-RBFs for Shape Reconstruction from Cloud of Points. In: N.A. Dodgson, M.S. Floater and M.A. Sabin, eds., Advances in Multiresolution for Geometric Modelling. Springer, Berlin, Germany.
Export
BibTeX
@incollection{mingle04obs, TITLE = {Multi-scale and Adaptive {CS}-{RBFs} for Shape Reconstruction from Cloud of Points}, AUTHOR = {Ohtake, Yutaka and Belyaev, Alexander and Seidel, Hans-Peter}, EDITOR = {Dodgson, Neil A. and Floater, Michael S. and Sabin, Malcolm A.}, LANGUAGE = {eng}, ISBN = {3-540-21462-3}, LOCALID = {Local-ID: C125675300671F7B-9404156E86DB3A98C1256FAF003EDA44-mingle04obs}, PUBLISHER = {Springer}, ADDRESS = {Berlin, Germany}, YEAR = {2005}, DATE = {2005}, BOOKTITLE = {Advances in Multiresolution for Geometric Modelling}, DEBUG = {editor: Dodgson, Neil A.; editor: Floater, Michael S.; editor: Sabin, Malcolm A.}, PAGES = {143--154}, }
Endnote
%0 Book Section %A Ohtake, Yutaka %A Belyaev, Alexander %A Seidel, Hans-Peter %E Dodgson, Neil A. %E Floater, Michael S. %E Sabin, Malcolm A. %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Multi-scale and Adaptive CS-RBFs for Shape Reconstruction from Cloud of Points : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2721-A %F EDOC: 279022 %F OTHER: Local-ID: C125675300671F7B-9404156E86DB3A98C1256FAF003EDA44-mingle04obs %I Springer %C Berlin, Germany %D 2005 %B Advances in Multiresolution for Geometric Modelling %E Dodgson, Neil A.; Floater, Michael S.; Sabin, Malcolm A. %P 143 - 154 %I Springer %C Berlin, Germany %@ 3-540-21462-3
Nürnberger, G., Rössl, C., Zeilfelder, F., and Seidel, H.-P. 2005. Quasi-Interpolation by Quadratic Piecewise Polynomials in Three Variables. Computer Aided Geometric Design22, 3.
Abstract
A quasi-interpolation method for quadratic piecewise polynomials in three<br>variables is described which can be used for the efficient reconstruction and<br>visualization of gridded volume data. We analyze the smoothness<br>properties of the trivariate splines. We prove that the splines yield nearly<br>optimal approximation order while simultaneously its piecewise derivatives<br>provide optimal approximation of the derivatives of smooth functions.<br>The constants of the corresponding error bounds are given explicitly.<br>Numerical tests confirm the results and the efficiency<br>of the method.
Export
BibTeX
@article{Nurnberger-et-al_CAGD05, TITLE = {Quasi-Interpolation by Quadratic Piecewise Polynomials in Three Variables}, AUTHOR = {N{\"u}rnberger, G{\"u}nther and R{\"o}ssl, Christian and Zeilfelder, Frank and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-8396}, DOI = {10.1016/j.cagd.2004.11.002}, LOCALID = {Local-ID: C125675300671F7B-817E7400112458E6C1256F1D002EAAC1-nrsz:qiqpp:2004}, PUBLISHER = {North-Holland}, ADDRESS = {Amsterdam}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {A quasi-interpolation method for quadratic piecewise polynomials in three<br>variables is described which can be used for the efficient reconstruction and<br>visualization of gridded volume data. We analyze the smoothness<br>properties of the trivariate splines. We prove that the splines yield nearly<br>optimal approximation order while simultaneously its piecewise derivatives<br>provide optimal approximation of the derivatives of smooth functions.<br>The constants of the corresponding error bounds are given explicitly.<br>Numerical tests confirm the results and the efficiency<br>of the method.}, JOURNAL = {Computer Aided Geometric Design}, VOLUME = {22}, NUMBER = {3}, PAGES = {221--249}, }
Endnote
%0 Journal Article %A N&#252;rnberger, G&#252;nther %A R&#246;ssl, Christian %A Zeilfelder, Frank %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Quasi-Interpolation by Quadratic Piecewise Polynomials in Three Variables : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2778-6 %F EDOC: 278995 %F OTHER: Local-ID: C125675300671F7B-817E7400112458E6C1256F1D002EAAC1-nrsz:qiqpp:2004 %R 10.1016/j.cagd.2004.11.002 %D 2005 %* Review method: peer-reviewed %X A quasi-interpolation method for quadratic piecewise polynomials in three<br>variables is described which can be used for the efficient reconstruction and<br>visualization of gridded volume data. We analyze the smoothness<br>properties of the trivariate splines. We prove that the splines yield nearly<br>optimal approximation order while simultaneously its piecewise derivatives<br>provide optimal approximation of the derivatives of smooth functions.<br>The constants of the corresponding error bounds are given explicitly.<br>Numerical tests confirm the results and the efficiency<br>of the method. %J Computer Aided Geometric Design %V 22 %N 3 %& 221 %P 221 - 249 %I North-Holland %C Amsterdam %@ false
Mertens, T., Kautz, J., Bekaert, P., Reeth, F.V., and Seidel, H.-P. 2005. Efficient Rendering of Local Subsurface Scattering. Computer Graphics Forum24, 1.
Export
BibTeX
@article{DBLP:journals/cgf/MertensKBRS05, TITLE = {Efficient Rendering of Local Subsurface Scattering}, AUTHOR = {Mertens, Tom and Kautz, Jan and Bekaert, Philippe and Reeth, Frank Van and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/J.1467-8659.2005.00827.X}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2005}, DATE = {2005}, JOURNAL = {Computer Graphics Forum}, VOLUME = {24}, NUMBER = {1}, PAGES = {41--49}, }
Endnote
%0 Journal Article %A Mertens, Tom %A Kautz, Jan %A Bekaert, Philippe %A Reeth, Frank Van %A Seidel, Hans-Peter %+ External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Efficient Rendering of Local Subsurface Scattering : %G eng %U http://hdl.handle.net/21.11116/0000-000F-43C4-8 %R 10.1111/J.1467-8659.2005.00827.X %D 2005 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 24 %N 1 %& 41 %P 41 - 49 %I Blackwell-Wiley %C Oxford %@ false
Mantiuk, R., Daly, S., Myszkowski, K., and Seidel, H.-P. 2005a. Predicting Visible Differences in High Dynamic Range Images - Model and its Calibration. Human Vision and Electronic Imaging X, IS&T/SPIE’s 17th Annual Symposium on Electronic Imaging (2005), SPIE.
Abstract
New imaging and rendering systems commonly use physically accurate<br> lighting information in the form of high-dynamic range (HDR) images<br> and video. HDR images contain actual colorimetric or physical<br> values, which can span 14 orders of magnitude, instead of 8-bit<br> renderings, found in standard images. The additional precision and<br> quality retained in HDR visual data is necessary to display images<br> on advanced HDR display devices, capable of showing contrast of<br> 50,000:1, as compared to the contrast of 700:1 for LCD displays.<br> With the development of high-dynamic range visual techniques comes a<br> need for an automatic visual quality assessment of the resulting<br> images.<br><br> In this paper we propose several modifications to the Visual<br> Difference Predicator (VDP). The modifications improve the<br> prediction of perceivable differences in the full visible range of<br> luminance and under the adaptation conditions corresponding to real<br> scene observation. The proposed metric takes into account the<br> aspects of high contrast vision, like scattering of the light in the<br> optics (OTF), nonlinear response to light for the full range of<br> luminance, and local adaptation. To calibrate our HDR~VDP we perform<br> experiments using an advanced HDR display, capable of displaying the<br> range of luminance that is close to that found in real scenes.
Export
BibTeX
@inproceedings{Mantiuk-et-al_SPIE05, TITLE = {Predicting Visible Differences in High Dynamic Range Images -- Model and its Calibration}, AUTHOR = {Mantiuk, Rafal and Daly, Scott and Myszkowski, Karol and Seidel, Hans-Peter}, EDITOR = {Daly, Scott J.}, LANGUAGE = {eng}, ISSN = {0277-786X}, ISBN = {978-0-8194-5639-7}, DOI = {10.1117/12.586757}, LOCALID = {Local-ID: C125675300671F7B-7A33923425AEBF68C1256F800037FB11-Mantiuk2005}, PUBLISHER = {SPIE}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {New imaging and rendering systems commonly use physically accurate<br> lighting information in the form of high-dynamic range (HDR) images<br> and video. HDR images contain actual colorimetric or physical<br> values, which can span 14 orders of magnitude, instead of 8-bit<br> renderings, found in standard images. The additional precision and<br> quality retained in HDR visual data is necessary to display images<br> on advanced HDR display devices, capable of showing contrast of<br> 50,000:1, as compared to the contrast of 700:1 for LCD displays.<br> With the development of high-dynamic range visual techniques comes a<br> need for an automatic visual quality assessment of the resulting<br> images.<br><br> In this paper we propose several modifications to the Visual<br> Difference Predicator (VDP). The modifications improve the<br> prediction of perceivable differences in the full visible range of<br> luminance and under the adaptation conditions corresponding to real<br> scene observation. The proposed metric takes into account the<br> aspects of high contrast vision, like scattering of the light in the<br> optics (OTF), nonlinear response to light for the full range of<br> luminance, and local adaptation. To calibrate our HDR~VDP we perform<br> experiments using an advanced HDR display, capable of displaying the<br> range of luminance that is close to that found in real scenes.}, BOOKTITLE = {Human Vision and Electronic Imaging X, IS\&T/SPIE's 17th Annual Symposium on Electronic Imaging (2005)}, DEBUG = {editor: Rogowitz, Bernice E.; editor: Pappas, Thrasyvoulos N.; editor: Daly, Scott J.}, PAGES = {204--214}, SERIES = {SPIE Proceedings Series}, VOLUME = {5666}, ADDRESS = {San Jose, California USA}, }
Endnote
%0 Conference Proceedings %A Mantiuk, Rafal %A Daly, Scott %A Myszkowski, Karol %A Seidel, Hans-Peter %E Daly, Scott J. %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Predicting Visible Differences in High Dynamic Range Images - Model and its Calibration : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2773-0 %F EDOC: 278999 %F OTHER: Local-ID: C125675300671F7B-7A33923425AEBF68C1256F800037FB11-Mantiuk2005 %R 10.1117/12.586757 %D 2005 %B IS&T/SPIE's 17th Annual Symposium on Electronic Imaging %Z date of event: 2005-01-17 - %C San Jose, California USA %X New imaging and rendering systems commonly use physically accurate<br> lighting information in the form of high-dynamic range (HDR) images<br> and video. HDR images contain actual colorimetric or physical<br> values, which can span 14 orders of magnitude, instead of 8-bit<br> renderings, found in standard images. The additional precision and<br> quality retained in HDR visual data is necessary to display images<br> on advanced HDR display devices, capable of showing contrast of<br> 50,000:1, as compared to the contrast of 700:1 for LCD displays.<br> With the development of high-dynamic range visual techniques comes a<br> need for an automatic visual quality assessment of the resulting<br> images.<br><br> In this paper we propose several modifications to the Visual<br> Difference Predicator (VDP). The modifications improve the<br> prediction of perceivable differences in the full visible range of<br> luminance and under the adaptation conditions corresponding to real<br> scene observation. The proposed metric takes into account the<br> aspects of high contrast vision, like scattering of the light in the<br> optics (OTF), nonlinear response to light for the full range of<br> luminance, and local adaptation. To calibrate our HDR~VDP we perform<br> experiments using an advanced HDR display, capable of displaying the<br> range of luminance that is close to that found in real scenes. %B Human Vision and Electronic Imaging X, IS&T/SPIE's 17th Annual Symposium on Electronic Imaging (2005) %E Rogowitz, Bernice E.; Pappas, Thrasyvoulos N.; Daly, Scott J. %P 204 - 214 %I SPIE %@ 978-0-8194-5639-7 %B SPIE Proceedings Series %N 5666 %@ false
Mantiuk, R., Myszkowski, K., and Seidel, H.-P. 2005b. A Perceptual Framework for Contrast Processing of High Dynamic Range Images. APGV ’05: Proceedings of the 2nd Symposium on Applied Perception in Graphics and Visualization, ACM.
Abstract
In this work we propose a framework for image processing in a visual<br> response space, in which contrast values directly correlate with<br> their visibility in an image. Our framework involves a<br> transformation of an image from luminance space to a pyramid of<br> low-pass contrast images and then to the visual response space.<br> After modifying response values, the transformation can be reversed<br> to produce the resulting image. To predict the visibility of<br> suprathreshold contrast, we derive a transducer function for the<br> full range of contrast levels that can be found in High Dynamic<br> Range images. We show that a complex contrast compression operation,<br> which preserves textures of small contrast, is reduced to a linear<br> scaling in the proposed visual response space.
Export
BibTeX
@inproceedings{Mantiuk-et-al_APGV05, TITLE = {A Perceptual Framework for Contrast Processing of High Dynamic Range Images}, AUTHOR = {Mantiuk, Rafal and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-59593-139-9}, DOI = {10.1145/1080402.1080418}, LOCALID = {Local-ID: C125675300671F7B-C07FBDA152C52871C12570700034B914-mantiuk2004::contrast}, PUBLISHER = {ACM}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {In this work we propose a framework for image processing in a visual<br> response space, in which contrast values directly correlate with<br> their visibility in an image. Our framework involves a<br> transformation of an image from luminance space to a pyramid of<br> low-pass contrast images and then to the visual response space.<br> After modifying response values, the transformation can be reversed<br> to produce the resulting image. To predict the visibility of<br> suprathreshold contrast, we derive a transducer function for the<br> full range of contrast levels that can be found in High Dynamic<br> Range images. We show that a complex contrast compression operation,<br> which preserves textures of small contrast, is reduced to a linear<br> scaling in the proposed visual response space.}, BOOKTITLE = {APGV '05: Proceedings of the 2nd Symposium on Applied Perception in Graphics and Visualization}, EDITOR = {Malik, Jitendra and Koenderink, Jan J.}, PAGES = {87--94}, ADDRESS = {Coruna, Spain}, }
Endnote
%0 Conference Proceedings %A Mantiuk, Rafal %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Perceptual Framework for Contrast Processing of High Dynamic Range Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-25BB-F %F EDOC: 278998 %F OTHER: Local-ID: C125675300671F7B-C07FBDA152C52871C12570700034B914-mantiuk2004::contrast %R 10.1145/1080402.1080418 %D 2005 %B 2nd Symposium on Applied Perception in Graphics and Visualization %Z date of event: 2005-08-26 - 2005-08-28 %C Coruna, Spain %X In this work we propose a framework for image processing in a visual<br> response space, in which contrast values directly correlate with<br> their visibility in an image. Our framework involves a<br> transformation of an image from luminance space to a pyramid of<br> low-pass contrast images and then to the visual response space.<br> After modifying response values, the transformation can be reversed<br> to produce the resulting image. To predict the visibility of<br> suprathreshold contrast, we derive a transducer function for the<br> full range of contrast levels that can be found in High Dynamic<br> Range images. We show that a complex contrast compression operation,<br> which preserves textures of small contrast, is reduced to a linear<br> scaling in the proposed visual response space. %B APGV '05: Proceedings of the 2nd Symposium on Applied Perception in Graphics and Visualization %E Malik, Jitendra; Koenderink, Jan J. %P 87 - 94 %I ACM %@ 978-1-59593-139-9
Lipman, Y., Sorkine, O., Alexa, M., et al. 2005. Laplacian Framework for Interactive Mesh Editing. International Journal of Shape Modeling11, 1.
Abstract
Recent works in geometric modeling show the advantage of local differential <br>coordinates in various surface processing applications. In this paper we review <br>recent methods that advocate surface representation via differential <br>coordinates as a basis to interactive mesh editing. One of the main challenges <br>in editing a mesh is to retain the visual appearance of the surface after <br>applying various modifications. The differential coordinates capture the local <br>geometric details and therefore are a natural surface representation for <br>editing applications. The coordinates are obtained by applying a linear <br>operator to the mesh geometry. Given suitable deformation constraints, the mesh <br>geometry is reconstructed from the differential representation by solving a <br>sparse linear system. The differential coordinates are not rotation-invariant <br>and thus their rotation must be explicitly handled in order to retain the <br>correct orientation of the surface details. We review two methods for computing <br>the local rotations: the first estimates them heuristically using a deformation <br>which only preserves the underlying smooth surface, and the second estimates <br>the rotations implicitly through a variational representation of the problem.<br><br>We show that the linear reconstruction system can be solved fast enough to <br>guarantee interactive response time thanks to a precomputed factorization of <br>the coefficient matrix. We demonstrate that this approach enables to edit <br>complex meshes while retaining the shape of the details in their natural <br>orientation.
Export
BibTeX
@article{Lipman-et-al_IJSM05, TITLE = {Laplacian Framework for Interactive Mesh Editing}, AUTHOR = {Lipman, Yaron and Sorkine, Olga and Alexa, Marc and Cohen-Or, Daniel and Levin, David and R{\"o}ssl, Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0218-6543}, DOI = {10.1142/S0218654305000724}, LOCALID = {Local-ID: C125675300671F7B-88FD0EB43D6CBFABC125704D0034A2C1-lsaclrs:2005}, PUBLISHER = {World Scientific}, ADDRESS = {Singapore}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {Recent works in geometric modeling show the advantage of local differential <br>coordinates in various surface processing applications. In this paper we review <br>recent methods that advocate surface representation via differential <br>coordinates as a basis to interactive mesh editing. One of the main challenges <br>in editing a mesh is to retain the visual appearance of the surface after <br>applying various modifications. The differential coordinates capture the local <br>geometric details and therefore are a natural surface representation for <br>editing applications. The coordinates are obtained by applying a linear <br>operator to the mesh geometry. Given suitable deformation constraints, the mesh <br>geometry is reconstructed from the differential representation by solving a <br>sparse linear system. The differential coordinates are not rotation-invariant <br>and thus their rotation must be explicitly handled in order to retain the <br>correct orientation of the surface details. We review two methods for computing <br>the local rotations: the first estimates them heuristically using a deformation <br>which only preserves the underlying smooth surface, and the second estimates <br>the rotations implicitly through a variational representation of the problem.<br><br>We show that the linear reconstruction system can be solved fast enough to <br>guarantee interactive response time thanks to a precomputed factorization of <br>the coefficient matrix. We demonstrate that this approach enables to edit <br>complex meshes while retaining the shape of the details in their natural <br>orientation.}, JOURNAL = {International Journal of Shape Modeling}, VOLUME = {11}, NUMBER = {1}, PAGES = {43--61}, }
Endnote
%0 Journal Article %A Lipman, Yaron %A Sorkine, Olga %A Alexa, Marc %A Cohen-Or, Daniel %A Levin, David %A R&#246;ssl, Christian %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Laplacian Framework for Interactive Mesh Editing : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-26EB-C %F EDOC: 279000 %F OTHER: Local-ID: C125675300671F7B-88FD0EB43D6CBFABC125704D0034A2C1-lsaclrs:2005 %R 10.1142/S0218654305000724 %D 2005 %* Review method: peer-reviewed %X Recent works in geometric modeling show the advantage of local differential <br>coordinates in various surface processing applications. In this paper we review <br>recent methods that advocate surface representation via differential <br>coordinates as a basis to interactive mesh editing. One of the main challenges <br>in editing a mesh is to retain the visual appearance of the surface after <br>applying various modifications. The differential coordinates capture the local <br>geometric details and therefore are a natural surface representation for <br>editing applications. The coordinates are obtained by applying a linear <br>operator to the mesh geometry. Given suitable deformation constraints, the mesh <br>geometry is reconstructed from the differential representation by solving a <br>sparse linear system. The differential coordinates are not rotation-invariant <br>and thus their rotation must be explicitly handled in order to retain the <br>correct orientation of the surface details. We review two methods for computing <br>the local rotations: the first estimates them heuristically using a deformation <br>which only preserves the underlying smooth surface, and the second estimates <br>the rotations implicitly through a variational representation of the problem.<br><br>We show that the linear reconstruction system can be solved fast enough to <br>guarantee interactive response time thanks to a precomputed factorization of <br>the coefficient matrix. We demonstrate that this approach enables to edit <br>complex meshes while retaining the shape of the details in their natural <br>orientation. %J International Journal of Shape Modeling %V 11 %N 1 %& 43 %P 43 - 61 %I World Scientific %C Singapore %@ false
Lee, Y., Yoon, M., Lee, S., Ivrissimtzis, I., and Seidel, H.-P. 2005a. Ensembles for Surface Reconstruction. PG 2005, 13th Pacific Conference on Computer Graphics and Applications, University of Macao.
Export
BibTeX
@inproceedings{lylis2005, TITLE = {Ensembles for Surface Reconstruction}, AUTHOR = {Lee, Yunjin and Yoon, Mincheol and Lee, Seungyong and Ivrissimtzis, Ioannis and Seidel, Hans-Peter}, LANGUAGE = {eng}, LOCALID = {Local-ID: C125675300671F7B-89521737107C8D5FC125710E003CB3ED-lylis2005}, PUBLISHER = {University of Macao}, YEAR = {2005}, DATE = {2005}, BOOKTITLE = {PG 2005, 13th Pacific Conference on Computer Graphics and Applications}, EDITOR = {Gotsman, Craig and Manosha, Dinesh and Wu, Enhua}, PAGES = {1--3}, ADDRESS = {Macao, China}, }
Endnote
%0 Conference Proceedings %A Lee, Yunjin %A Yoon, Mincheol %A Lee, Seungyong %A Ivrissimtzis, Ioannis %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Ensembles for Surface Reconstruction : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-21BF-D %F EDOC: 279001 %F OTHER: Local-ID: C125675300671F7B-89521737107C8D5FC125710E003CB3ED-lylis2005 %D 2005 %B 13th Pacific Conference on Computer Graphics and Applications %Z date of event: 2005-10-12 - 2005-10-14 %C Macao, China %B PG 2005 %E Gotsman, Craig; Manosha, Dinesh; Wu, Enhua %P 1 - 3 %I University of Macao
Lee, Y., Lee, S., Shamir, A., Cohen-Or, D., and Seidel, H.-P. 2005b. Mesh Scissoring with Minima Rule and Part Salience. Computer Aided Geometric Design22, 5.
Export
BibTeX
@article{Lee-et-al_CAGD05, TITLE = {Mesh Scissoring with Minima Rule and Part Salience}, AUTHOR = {Lee, Yunjin and Lee, Seungyong and Shamir, Ariel and Cohen-Or, Daniel and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-8396}, DOI = {10.1016/j.cagd.2005.04.002}, LOCALID = {Local-ID: C125675300671F7B-8ED6810FB598BC3CC1256FB6004FF3FF-LeeMeshScissoring05}, PUBLISHER = {North-Holland}, ADDRESS = {Amsterdam}, YEAR = {2005}, DATE = {2005}, JOURNAL = {Computer Aided Geometric Design}, VOLUME = {22}, NUMBER = {5}, PAGES = {444--465}, }
Endnote
%0 Journal Article %A Lee, Yunjin %A Lee, Seungyong %A Shamir, Ariel %A Cohen-Or, Daniel %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Mesh Scissoring with Minima Rule and Part Salience : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2707-6 %F EDOC: 279002 %F OTHER: Local-ID: C125675300671F7B-8ED6810FB598BC3CC1256FB6004FF3FF-LeeMeshScissoring05 %R 10.1016/j.cagd.2005.04.002 %D 2005 %J Computer Aided Geometric Design %V 22 %N 5 %& 444 %P 444 - 465 %I North-Holland %C Amsterdam %@ false
Langer, T., Belyaev, A., and Seidel, H.-P. 2005a. Exact and Approximate Quadratures for Curvature Tensor Estimation. Poster Proceedings of the Third Eurographics Symposium on Geometry Processing (SGP 2005).
Export
BibTeX
@inproceedings{LangerSGP05, TITLE = {Exact and Approximate Quadratures for Curvature Tensor Estimation}, AUTHOR = {Langer, Torsten and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, LOCALID = {Local-ID: C125675300671F7B-75E239D1DD5561E6C12570430051CEFF-LangerSGP05}, YEAR = {2005}, BOOKTITLE = {Poster Proceedings of the Third Eurographics Symposium on Geometry Processing (SGP 2005)}, EDITOR = {Desbrun, Mathieu and Pottmann, Helmut}, ADDRESS = {Vienna, Austria}, }
Endnote
%0 Generic %A Langer, Torsten %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Exact and Approximate Quadratures for Curvature Tensor Estimation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-21AF-2 %F EDOC: 279005 %F OTHER: Local-ID: C125675300671F7B-75E239D1DD5561E6C12570430051CEFF-LangerSGP05 %D 2005 %Z name of event: Third Eurographics Symposium on Geometry Processing %Z date of event: 2005-07-04 - 2005-07-06 %Z place of event: Vienna, Austria %B Poster Proceedings of the Third Eurographics Symposium on Geometry Processing %E Desbrun, Mathieu; Pottmann, Helmut
Langer, T., Belyaev, A., and Seidel, H.-P. 2005b. Analysis and design of discrete normals and curvatures. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
Accurate estimations of geometric properties of a surface (a curve) from its discrete approximation are important for many computer graphics and computer vision applications. To assess and improve the quality of such an approximation we assume that the smooth surface (curve) is known in general form. Then we can represent the surface (curve) by a Taylor series expansion and compare its geometric properties with the corresponding discrete approximations. In turn we can either prove convergence of these approximations towards the true properties as the edge lengths tend to zero, or we can get hints how to eliminate the error. In this report we propose and study discrete schemes for estimating the curvature and torsion of a smooth 3D curve approximated by a polyline. Thereby we make some interesting findings about connections between (smooth) classical curves and certain estimation schemes for polylines. Furthermore, we consider several popular schemes for estimating the surface normal of a dense triangle mesh interpolating a smooth surface, and analyze their asymptotic properties. Special attention is paid to the mean curvature vector, that approximates both, normal direction and mean curvature. We evaluate a common discrete approximation and show how asymptotic analysis can be used to improve it. It turns out that the integral formulation of the mean curvature \begin{equation*} H = \frac{1}{2 \pi} \int_0^{2 \pi} \kappa(\phi) d\phi, \end{equation*} can be computed by an exact quadrature formula. The same is true for the integral formulations of Gaussian curvature and the Taubin tensor. The exact quadratures are then used to obtain reliable estimates of the curvature tensor of a smooth surface approximated by a dense triangle mesh. The proposed method is fast and often demonstrates a better performance than conventional curvature tensor estimation approaches. We also show that the curvature tensor approximated by our approach converges towards the true curvature tensor as the edge lengths tend to zero.
Export
BibTeX
@techreport{LangerBelyaevSeidel2005, TITLE = {Analysis and design of discrete normals and curvatures}, AUTHOR = {Langer, Torsten and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2005-4-003}, NUMBER = {MPI-I-2005-4-003}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {Accurate estimations of geometric properties of a surface (a curve) from its discrete approximation are important for many computer graphics and computer vision applications. To assess and improve the quality of such an approximation we assume that the smooth surface (curve) is known in general form. Then we can represent the surface (curve) by a Taylor series expansion and compare its geometric properties with the corresponding discrete approximations. In turn we can either prove convergence of these approximations towards the true properties as the edge lengths tend to zero, or we can get hints how to eliminate the error. In this report we propose and study discrete schemes for estimating the curvature and torsion of a smooth 3D curve approximated by a polyline. Thereby we make some interesting findings about connections between (smooth) classical curves and certain estimation schemes for polylines. Furthermore, we consider several popular schemes for estimating the surface normal of a dense triangle mesh interpolating a smooth surface, and analyze their asymptotic properties. Special attention is paid to the mean curvature vector, that approximates both, normal direction and mean curvature. We evaluate a common discrete approximation and show how asymptotic analysis can be used to improve it. It turns out that the integral formulation of the mean curvature \begin{equation*} H = \frac{1}{2 \pi} \int_0^{2 \pi} \kappa(\phi) d\phi, \end{equation*} can be computed by an exact quadrature formula. The same is true for the integral formulations of Gaussian curvature and the Taubin tensor. The exact quadratures are then used to obtain reliable estimates of the curvature tensor of a smooth surface approximated by a dense triangle mesh. The proposed method is fast and often demonstrates a better performance than conventional curvature tensor estimation approaches. We also show that the curvature tensor approximated by our approach converges towards the true curvature tensor as the edge lengths tend to zero.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Langer, Torsten %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Analysis and design of discrete normals and curvatures : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-6837-B %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2005-4-003 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2005 %P 42 p. %X Accurate estimations of geometric properties of a surface (a curve) from its discrete approximation are important for many computer graphics and computer vision applications. To assess and improve the quality of such an approximation we assume that the smooth surface (curve) is known in general form. Then we can represent the surface (curve) by a Taylor series expansion and compare its geometric properties with the corresponding discrete approximations. In turn we can either prove convergence of these approximations towards the true properties as the edge lengths tend to zero, or we can get hints how to eliminate the error. In this report we propose and study discrete schemes for estimating the curvature and torsion of a smooth 3D curve approximated by a polyline. Thereby we make some interesting findings about connections between (smooth) classical curves and certain estimation schemes for polylines. Furthermore, we consider several popular schemes for estimating the surface normal of a dense triangle mesh interpolating a smooth surface, and analyze their asymptotic properties. Special attention is paid to the mean curvature vector, that approximates both, normal direction and mean curvature. We evaluate a common discrete approximation and show how asymptotic analysis can be used to improve it. It turns out that the integral formulation of the mean curvature \begin{equation*} H = \frac{1}{2 \pi} \int_0^{2 \pi} \kappa(\phi) d\phi, \end{equation*} can be computed by an exact quadrature formula. The same is true for the integral formulations of Gaussian curvature and the Taubin tensor. The exact quadratures are then used to obtain reliable estimates of the curvature tensor of a smooth surface approximated by a dense triangle mesh. The proposed method is fast and often demonstrates a better performance than conventional curvature tensor estimation approaches. We also show that the curvature tensor approximated by our approach converges towards the true curvature tensor as the edge lengths tend to zero. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Langer, T., Belyaev, A., and Seidel, H.-P. 2005c. Asymptotic Analysis of Discrete Normals and Curvatures of Polylines. SCCG ’05: Proceedings of the 21st spring conference on Computer graphics, ACM.
Abstract
Accurate estimations of geometric properties of a smooth curve from its <br>discrete approximation are important for many computer graphics and computer <br>vision applications. To assess and improve the quality of such an <br>approximation, we assume that the curve is known in general form. Then we can <br>represent the curve by a Taylor series expansion and compare its geometric <br>properties with the corresponding discrete approximations. In turn we can <br>either prove convergence of these approximations towards the true properties as <br>the edge lengths tend to zero, or we can get hints on how to eliminate the <br>error. In this paper, we propose and study discrete schemes for estimating <br>tangent and normal vectors as well as for estimating curvature and torsion of a <br>smooth 3D curve approximated by a polyline. Thereby we make some interesting <br>findings about connections between (smooth) classical curves and certain <br>estimation schemes for polylines.
Export
BibTeX
@inproceedings{Langer-et-al_SCCG05, TITLE = {Asymptotic Analysis of Discrete Normals and Curvatures of Polylines}, AUTHOR = {Langer, Torsten and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-59593-204-4}, DOI = {10.1145/1090122.1090160}, LOCALID = {Local-ID: C125675300671F7B-519A2B2A83342C3AC12571920048876E-LangerSCCG05}, PUBLISHER = {ACM}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {Accurate estimations of geometric properties of a smooth curve from its <br>discrete approximation are important for many computer graphics and computer <br>vision applications. To assess and improve the quality of such an <br>approximation, we assume that the curve is known in general form. Then we can <br>represent the curve by a Taylor series expansion and compare its geometric <br>properties with the corresponding discrete approximations. In turn we can <br>either prove convergence of these approximations towards the true properties as <br>the edge lengths tend to zero, or we can get hints on how to eliminate the <br>error. In this paper, we propose and study discrete schemes for estimating <br>tangent and normal vectors as well as for estimating curvature and torsion of a <br>smooth 3D curve approximated by a polyline. Thereby we make some interesting <br>findings about connections between (smooth) classical curves and certain <br>estimation schemes for polylines.}, BOOKTITLE = {SCCG '05: Proceedings of the 21st spring conference on Computer graphics}, PAGES = {229--232}, ADDRESS = {Budmerice, Slovakia}, }
Endnote
%0 Conference Proceedings %A Langer, Torsten %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Asymptotic Analysis of Discrete Normals and Curvatures of Polylines : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-25CC-9 %F EDOC: 279006 %F OTHER: Local-ID: C125675300671F7B-519A2B2A83342C3AC12571920048876E-LangerSCCG05 %R 10.1145/1090122.1090160 %D 2005 %B 21st Spring Conference on Computer Graphics %Z date of event: 2005-05-12 - 2005-05-14 %C Budmerice, Slovakia %X Accurate estimations of geometric properties of a smooth curve from its <br>discrete approximation are important for many computer graphics and computer <br>vision applications. To assess and improve the quality of such an <br>approximation, we assume that the curve is known in general form. Then we can <br>represent the curve by a Taylor series expansion and compare its geometric <br>properties with the corresponding discrete approximations. In turn we can <br>either prove convergence of these approximations towards the true properties as <br>the edge lengths tend to zero, or we can get hints on how to eliminate the <br>error. In this paper, we propose and study discrete schemes for estimating <br>tangent and normal vectors as well as for estimating curvature and torsion of a <br>smooth 3D curve approximated by a polyline. Thereby we make some interesting <br>findings about connections between (smooth) classical curves and certain <br>estimation schemes for polylines. %B SCCG '05: Proceedings of the 21st spring conference on Computer graphics %P 229 - 232 %I ACM %@ 978-1-59593-204-4
Langer, T., Belyaev, A., and Seidel, H.-P. 2005d. Exact and Approximate Quadratures for Curvature Tensor Estimation. Vision, Modeling, and Visualization 2005 (VMV 2005), Akademische Verlagsgesellschaft Aka.
Abstract
Accurate estimations of geometric properties of a surface from its discrete approximation are important for many computer graphics and geometric modeling applications. In this paper, we derive exact quadrature formulae for mean curvature, Gaussian curvature, and the Taubin integral representation of the curvature tensor. The exact quadratures are then used to obtain reliable estimates of the curvature tensor of a smooth surface approximated by a dense triangle mesh. The proposed method is fast and easy to implement. It is highly competitive with conventional curvature tensor estimation approaches. Additionally, we show that the curvature tensor approximated as proposed by us converges towards the true curvature tensor as the edge lengths tend to zero.
Export
BibTeX
@inproceedings{LangerVMV05, TITLE = {Exact and Approximate Quadratures for Curvature Tensor Estimation}, AUTHOR = {Langer, Torsten and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-89838-068-8}, LOCALID = {Local-ID: C125675300671F7B-0DAAC76861ABE5FEC12570C1003CF16D-LangerVMV05}, PUBLISHER = {Akademische Verlagsgesellschaft Aka}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {Accurate estimations of geometric properties of a surface from its discrete approximation are important for many computer graphics and geometric modeling applications. In this paper, we derive exact quadrature formulae for mean curvature, Gaussian curvature, and the Taubin integral representation of the curvature tensor. The exact quadratures are then used to obtain reliable estimates of the curvature tensor of a smooth surface approximated by a dense triangle mesh. The proposed method is fast and easy to implement. It is highly competitive with conventional curvature tensor estimation approaches. Additionally, we show that the curvature tensor approximated as proposed by us converges towards the true curvature tensor as the edge lengths tend to zero.}, BOOKTITLE = {Vision, Modeling, and Visualization 2005 (VMV 2005)}, EDITOR = {Greiner, G{\"u}nther and Hornegger, Joachim and Niemann, Heinrich and Stamminger, Marc}, PAGES = {421--428}, ADDRESS = {Erlangen, Germany}, }
Endnote
%0 Conference Proceedings %A Langer, Torsten %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Exact and Approximate Quadratures for Curvature Tensor Estimation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2674-5 %F EDOC: 279004 %F OTHER: Local-ID: C125675300671F7B-0DAAC76861ABE5FEC12570C1003CF16D-LangerVMV05 %D 2005 %B Vision, Modeling, and Visualization 2005 %Z date of event: 2005-11-16 - 2005-11-18 %C Erlangen, Germany %X Accurate estimations of geometric properties of a surface from its discrete approximation are important for many computer graphics and geometric modeling applications. In this paper, we derive exact quadrature formulae for mean curvature, Gaussian curvature, and the Taubin integral representation of the curvature tensor. The exact quadratures are then used to obtain reliable estimates of the curvature tensor of a smooth surface approximated by a dense triangle mesh. The proposed method is fast and easy to implement. It is highly competitive with conventional curvature tensor estimation approaches. Additionally, we show that the curvature tensor approximated as proposed by us converges towards the true curvature tensor as the edge lengths tend to zero. %B Vision, Modeling, and Visualization 2005 %E Greiner, G&#252;nther; Hornegger, Joachim; Niemann, Heinrich; Stamminger, Marc %P 421 - 428 %I Akademische Verlagsgesellschaft Aka %@ 3-89838-068-8
Krawczyk, G., Gösele, M., and Seidel, H.-P. 2005a. Photometric calibration of high dynamic range cameras. Max-Planck-Institut für Informatik, Saarbrücken.
Export
BibTeX
@techreport{KrawczykGoeseleSeidel2005, TITLE = {Photometric calibration of high dynamic range cameras}, AUTHOR = {Krawczyk, Grzegorz and G{\"o}sele, Michael and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2005-4-005}, NUMBER = {MPI-I-2005-4-005}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2005}, DATE = {2005}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Krawczyk, Grzegorz %A G&#246;sele, Michael %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Photometric calibration of high dynamic range cameras : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-6834-2 %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2005-4-005 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2005 %P 21 p. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Krawczyk, G., Myszkowski, K., and Seidel, H.-P. 2005b. Lightness Perception in Tone Reproduction for High Dynamic Range Images. Computer Graphics Forum, Blackwell.
Export
BibTeX
@inproceedings{Krawczyk-et-al_EUROGRAPHICS_05, TITLE = {Lightness Perception in Tone Reproduction for High Dynamic Range Images}, AUTHOR = {Krawczyk, Grzegorz and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2005.00888.x}, LOCALID = {Local-ID: C125675300671F7B-D7B5D281DAAB9EB0C1256FE90049E357-Krawczyk05EG}, PUBLISHER = {Blackwell}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2005}, DATE = {2005}, BOOKTITLE = {The European Association for Computer Graphics 26th Annual Conference : EUROGRAPHICS 2005}, EDITOR = {Alexa, Marc and Marks, Joe}, PAGES = {635--645}, JOURNAL = {Computer Graphics Forum}, VOLUME = {24}, ISSUE = {3}, ADDRESS = {Dublin, Ireland}, }
Endnote
%0 Conference Proceedings %A Krawczyk, Grzegorz %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Lightness Perception in Tone Reproduction for High Dynamic Range Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-26F6-3 %F EDOC: 279009 %F OTHER: Local-ID: C125675300671F7B-D7B5D281DAAB9EB0C1256FE90049E357-Krawczyk05EG %R 10.1111/j.1467-8659.2005.00888.x %D 2005 %B The European Association for Computer Graphics 26th Annual Conference %Z date of event: 2005-08-29 - %C Dublin, Ireland %B The European Association for Computer Graphics 26th Annual Conference : EUROGRAPHICS 2005 %E Alexa, Marc; Marks, Joe %P 635 - 645 %I Blackwell %J Computer Graphics Forum %V 24 %N 3 %I Blackwell-Wiley %@ false
Krawczyk, G., Myszkowski, K., and Seidel, H.-P. 2005c. Perceptual Effects in Real-time Tone Mapping. SCCG ’05: Proceedings of the 21st spring conference on Computer graphics, ACM.
Abstract
Tremendous progress in the development and accessibility of high dynamic range <br>(HDR) technology that has happened just recently results in fast proliferation <br>of HDR synthetic image sequences and captured HDR video. When properly <br>processed, such HDR data can lead to very convincing and realistic results even <br>when presented on traditional low dynamic range (LDR) display devices. This <br>requires real-time local contrast compression (tone mapping) with simultaneous <br>modeling of important in HDR image perception effects such as visual acuity, <br>glare, day and night vision. We propose a unified model to include all those <br>effects into a common computational framework, which enables an efficient <br>implementation on currently available graphics hardware. We develop a post <br>processing module which can be added as the final stage of any real-time <br>rendering system, game engine, or digital video player, which enhances the <br>realism and believability of displayed image streams.
Export
BibTeX
@inproceedings{Krawczyk-et-al_SCCG05, TITLE = {Perceptual Effects in Real-time Tone Mapping}, AUTHOR = {Krawczyk, Grzegorz and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-59593-204-4}, DOI = {10.1145/1090122.1090154}, LOCALID = {Local-ID: C125675300671F7B-A48310C4FDBE1EA6C1256FE9004D4776-Krawczyk2005sccg}, PUBLISHER = {ACM}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {Tremendous progress in the development and accessibility of high dynamic range <br>(HDR) technology that has happened just recently results in fast proliferation <br>of HDR synthetic image sequences and captured HDR video. When properly <br>processed, such HDR data can lead to very convincing and realistic results even <br>when presented on traditional low dynamic range (LDR) display devices. This <br>requires real-time local contrast compression (tone mapping) with simultaneous <br>modeling of important in HDR image perception effects such as visual acuity, <br>glare, day and night vision. We propose a unified model to include all those <br>effects into a common computational framework, which enables an efficient <br>implementation on currently available graphics hardware. We develop a post <br>processing module which can be added as the final stage of any real-time <br>rendering system, game engine, or digital video player, which enhances the <br>realism and believability of displayed image streams.}, BOOKTITLE = {SCCG '05: Proceedings of the 21st spring conference on Computer graphics}, PAGES = {195--202}, ADDRESS = {Budmerice, Slovakia}, }
Endnote
%0 Conference Proceedings %A Krawczyk, Grzegorz %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptual Effects in Real-time Tone Mapping : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2757-0 %F EDOC: 279038 %F OTHER: Local-ID: C125675300671F7B-A48310C4FDBE1EA6C1256FE9004D4776-Krawczyk2005sccg %R 10.1145/1090122.1090154 %D 2005 %B 21st Spring Conference on Computer Graphics %Z date of event: 2005-05-12 - 2005-05-14 %C Budmerice, Slovakia %X Tremendous progress in the development and accessibility of high dynamic range <br>(HDR) technology that has happened just recently results in fast proliferation <br>of HDR synthetic image sequences and captured HDR video. When properly <br>processed, such HDR data can lead to very convincing and realistic results even <br>when presented on traditional low dynamic range (LDR) display devices. This <br>requires real-time local contrast compression (tone mapping) with simultaneous <br>modeling of important in HDR image perception effects such as visual acuity, <br>glare, day and night vision. We propose a unified model to include all those <br>effects into a common computational framework, which enables an efficient <br>implementation on currently available graphics hardware. We develop a post <br>processing module which can be added as the final stage of any real-time <br>rendering system, game engine, or digital video player, which enhances the <br>realism and believability of displayed image streams. %B SCCG '05: Proceedings of the 21st spring conference on Computer graphics %P 195 - 202 %I ACM %@ 978-1-59593-204-4
Ivrissimtzis, I., Jeong, W.-K., Lee, S., Lee, Y., and Seidel, H.-P. 2005a. Surface Reconstruction with Neural Meshes. 6th International Conference on Mathematical Methods for Curves and Surfaces, Nashboro Press.
Export
BibTeX
@inproceedings{ijlls05, TITLE = {Surface Reconstruction with Neural Meshes}, AUTHOR = {Ivrissimtzis, Ioannis and Jeong, Won-Ki and Lee, Seungyong and Lee, Yunjin and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-9728482-4-X}, LOCALID = {Local-ID: C125675300671F7B-15E915F2BE6F7810C1256FC10048D66F-ijlls05}, PUBLISHER = {Nashboro Press}, YEAR = {2004}, DATE = {2005}, BOOKTITLE = {6th International Conference on Mathematical Methods for Curves and Surfaces}, PAGES = {223--242}, }
Endnote
%0 Conference Proceedings %A Ivrissimtzis, Ioannis %A Jeong, Won-Ki %A Lee, Seungyong %A Lee, Yunjin %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Surface Reconstruction with Neural Meshes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-27D1-D %F EDOC: 279012 %F OTHER: Local-ID: C125675300671F7B-15E915F2BE6F7810C1256FC10048D66F-ijlls05 %I Nashboro Press %D 2005 %B Untitled Event %Z date of event: 2004-07-01 - %C Tromso, Norway %B 6th International Conference on Mathematical Methods for Curves and Surfaces %P 223 - 242 %I Nashboro Press %@ 0-9728482-4-X
Ivrissimtzis, I., Zayer, R., and Seidel, H.-P. 2005b. Polygonal decompositions of quadrilateral subdivision meshes. Computer Graphics & Geometry7.
Abstract
We study a polygonal decomposition of the 1-ring neighborhood of a quadrilateral mesh. This decomposition corresponds to the eigenvectors of a matrix with circulant blocks, thus, it is suitable for the study of subdivision schemes. First, we calculate the extent of the local mesh area we have to consider in order to get a geometrically meaningful decomposition. Then we concentrate on the Catmull-Clark scheme and decompose the 1-ring neighborhood into 2n planar 2n-gons, which under subdivision scheme transform into 4n planar n-gons coming in pairs of coplanar polygons and quadruples of parallel polygons. We calculate the eigenvalues and eigenvectors of the transformations of these configurations showing their relation with the tangent plane and the curvature properties of the subdivision surface. Using direct computations on circulant-block matrices we show how the same eigenvalues can be analytically deduced from the subdivision matrix.
Export
BibTeX
@article{izs2005a, TITLE = {Polygonal decompositions of quadrilateral subdivision meshes}, AUTHOR = {Ivrissimtzis, Ioannis and Zayer, Rhaleb and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1811-8992}, LOCALID = {Local-ID: C125675300671F7B-B462B3435182A3AEC1256FC1004E095F-izs2005a}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {We study a polygonal decomposition of the 1-ring neighborhood of a quadrilateral mesh. This decomposition corresponds to the eigenvectors of a matrix with circulant blocks, thus, it is suitable for the study of subdivision schemes. First, we calculate the extent of the local mesh area we have to consider in order to get a geometrically meaningful decomposition. Then we concentrate on the Catmull-Clark scheme and decompose the 1-ring neighborhood into 2n planar 2n-gons, which under subdivision scheme transform into 4n planar n-gons coming in pairs of coplanar polygons and quadruples of parallel polygons. We calculate the eigenvalues and eigenvectors of the transformations of these configurations showing their relation with the tangent plane and the curvature properties of the subdivision surface. Using direct computations on circulant-block matrices we show how the same eigenvalues can be analytically deduced from the subdivision matrix.}, JOURNAL = {Computer Graphics \& Geometry}, VOLUME = {7}, PAGES = {16--30}, }
Endnote
%0 Journal Article %A Ivrissimtzis, Ioannis %A Zayer, Rhaleb %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Polygonal decompositions of quadrilateral subdivision meshes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-275F-F %F EDOC: 278948 %F OTHER: Local-ID: C125675300671F7B-B462B3435182A3AEC1256FC1004E095F-izs2005a %D 2005 %X We study a polygonal decomposition of the 1-ring neighborhood of a quadrilateral mesh. This decomposition corresponds to the eigenvectors of a matrix with circulant blocks, thus, it is suitable for the study of subdivision schemes. First, we calculate the extent of the local mesh area we have to consider in order to get a geometrically meaningful decomposition. Then we concentrate on the Catmull-Clark scheme and decompose the 1-ring neighborhood into 2n planar 2n-gons, which under subdivision scheme transform into 4n planar n-gons coming in pairs of coplanar polygons and quadruples of parallel polygons. We calculate the eigenvalues and eigenvectors of the transformations of these configurations showing their relation with the tangent plane and the curvature properties of the subdivision surface. Using direct computations on circulant-block matrices we show how the same eigenvalues can be analytically deduced from the subdivision matrix. %J Computer Graphics & Geometry %V 7 %& 16 %P 16 - 30 %@ false
Isenburg, M., Ivrissimtzis, I., Gumhold, S., and Seidel, H.-P. 2005. Geometry Prediction for High Degree Polygons. SCCG ’05: Proceedings of the 21st Spring Conference on Computer Graphics, ACM.
Export
BibTeX
@inproceedings{Isenburg-et-al_SCCG05, TITLE = {Geometry Prediction for High Degree Polygons}, AUTHOR = {Isenburg, Martin and Ivrissimtzis, Ioannis and Gumhold, Stefan and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-59593-204-4}, DOI = {10.1145/1090122.1090146}, LOCALID = {Local-ID: C125675300671F7B-F0F0DA1C5853C587C12571920044267B-iigs2005}, PUBLISHER = {ACM}, YEAR = {2005}, DATE = {2005}, BOOKTITLE = {SCCG '05: Proceedings of the 21st Spring Conference on Computer Graphics}, PAGES = {147--152}, ADDRESS = {Budmerice, Slovakia}, }
Endnote
%0 Conference Proceedings %A Isenburg, Martin %A Ivrissimtzis, Ioannis %A Gumhold, Stefan %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Geometry Prediction for High Degree Polygons : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-26AC-9 %F EDOC: 279014 %F OTHER: Local-ID: C125675300671F7B-F0F0DA1C5853C587C12571920044267B-iigs2005 %R 10.1145/1090122.1090146 %D 2005 %B 21st Spring Conference on Computer Graphics %Z date of event: 2005-05-12 - 2005-05-14 %C Budmerice, Slovakia %B SCCG '05: Proceedings of the 21st Spring Conference on Computer Graphics %P 147 - 152 %I ACM %@ 978-1-59593-204-4
Havran, V., Smyk, M., Krawczyk, G., Myszkowski, K., and Seidel, H.-P. 2005a. Importance Sampling for Video Environment Maps. SIGGRAPH ’05: ACM SIGGRAPH 2005 Sketches, ACM.
Export
BibTeX
@inproceedings{DBLP:conf/siggraph/HavranSKMS05, TITLE = {Importance Sampling for Video Environment Maps}, AUTHOR = {Havran, Vlastimil and Smyk, Miloslaw and Krawczyk, Grzegorz and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4503-7827-7}, DOI = {10.1145/1187112.1187243}, PUBLISHER = {ACM}, YEAR = {2005}, DATE = {2005}, BOOKTITLE = {SIGGRAPH '05: ACM SIGGRAPH 2005 Sketches}, EDITOR = {Buhler, Juan}, PAGES = {109}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Havran, Vlastimil %A Smyk, Miloslaw %A Krawczyk, Grzegorz %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Importance Sampling for Video Environment Maps : %G eng %U http://hdl.handle.net/21.11116/0000-000F-3CD6-D %R 10.1145/1187112.1187243 %D 2005 %B International Conference on Computer Graphics and Interactive Techniques %Z date of event: 2005-07-31 - 2005-08-04 %C Los Angeles, CA, USA %B SIGGRAPH '05: ACM SIGGRAPH 2005 Sketches %E Buhler, Juan %P 109 %I ACM %@ 978-1-4503-7827-7
Havran, V., Smyk, M., Krawczyk, G., Myszkowski, K., and Seidel, H.-P. 2005b. Interactive System for Dynamic Scene Lighting using Captured Video Environment Maps. Rendering Techniques 2005: Eurographics Symposium on Rendering (EGSR 2005), Eurographics Association.
Abstract
We present an interactive system for fully dynamic scene lighting using <br>captured high dynamic range (HDR) video environment maps. The key component of <br>our system is an algorithm for efficient decomposition of HDR video environment <br>map captured over hemisphere into a set of representative directional light <br>sources, which can be used for the direct lighting computation with shadows <br>using graphics hardware. The resulting lights exhibit good temporal coherence <br>and their number can be adaptively changed to keep a constant framerate while <br>good spatial distribution (stratification) properties are maintained. We can <br>handle a large number of light sources with shadows using a novel technique <br>which reduces the cost of BRDF-based shading and visibility computations. We <br>demonstrate the use of our system in a mixed reality application in which real <br>and synthetic objects are illuminated by consistent lighting at interactive <br>framerates.
Export
BibTeX
@inproceedings{Havran-et-al_EGSR05.2, TITLE = {Interactive System for Dynamic Scene Lighting using Captured Video Environment Maps}, AUTHOR = {Havran, Vlastimil and Smyk, Miloslaw and Krawczyk, Grzegorz and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-905673-23-1}, DOI = {10.2312/EGWR/EGSR05/031-042}, LOCALID = {Local-ID: C125675300671F7B-C3468DABE0F8D837C12570B30047ED74-Havran2005egsrEM}, PUBLISHER = {Eurographics Association}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {We present an interactive system for fully dynamic scene lighting using <br>captured high dynamic range (HDR) video environment maps. The key component of <br>our system is an algorithm for efficient decomposition of HDR video environment <br>map captured over hemisphere into a set of representative directional light <br>sources, which can be used for the direct lighting computation with shadows <br>using graphics hardware. The resulting lights exhibit good temporal coherence <br>and their number can be adaptively changed to keep a constant framerate while <br>good spatial distribution (stratification) properties are maintained. We can <br>handle a large number of light sources with shadows using a novel technique <br>which reduces the cost of BRDF-based shading and visibility computations. We <br>demonstrate the use of our system in a mixed reality application in which real <br>and synthetic objects are illuminated by consistent lighting at interactive <br>framerates.}, BOOKTITLE = {Rendering Techniques 2005: Eurographics Symposium on Rendering (EGSR 2005)}, EDITOR = {Deussen, Oliver and Keller, Alexander and Bala, Kavita and Dutr{\'e}, Philip and Fellner, Dieter W. and Spencer, Stephen N.}, PAGES = {31--42, 311}, ADDRESS = {Konstanz, Germany}, }
Endnote
%0 Conference Proceedings %A Havran, Vlastimil %A Smyk, Miloslaw %A Krawczyk, Grzegorz %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive System for Dynamic Scene Lighting using Captured Video Environment Maps : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-26DB-0 %F EDOC: 279016 %F OTHER: Local-ID: C125675300671F7B-C3468DABE0F8D837C12570B30047ED74-Havran2005egsrEM %R 10.2312/EGWR/EGSR05/031-042 %D 2005 %B 16th Eurographics Symposium on Rendering %Z date of event: 2005-06-29 - 2005-07-01 %C Konstanz, Germany %X We present an interactive system for fully dynamic scene lighting using <br>captured high dynamic range (HDR) video environment maps. The key component of <br>our system is an algorithm for efficient decomposition of HDR video environment <br>map captured over hemisphere into a set of representative directional light <br>sources, which can be used for the direct lighting computation with shadows <br>using graphics hardware. The resulting lights exhibit good temporal coherence <br>and their number can be adaptively changed to keep a constant framerate while <br>good spatial distribution (stratification) properties are maintained. We can <br>handle a large number of light sources with shadows using a novel technique <br>which reduces the cost of BRDF-based shading and visibility computations. We <br>demonstrate the use of our system in a mixed reality application in which real <br>and synthetic objects are illuminated by consistent lighting at interactive <br>framerates. %B Rendering Techniques 2005: Eurographics Symposium on Rendering %E Deussen, Oliver; Keller, Alexander; Bala, Kavita; Dutr&#233;, Philip; Fellner, Dieter W.; Spencer, Stephen N. %P 31 - 42, 311 %I Eurographics Association %@ 3-905673-23-1
Havran, V., Bittner, J., Herzog, R., and Seidel, H.-P. 2005c. Ray Maps for Global Illumination. Rendering Techniques 2005: Eurographics Symposium on Rendering, Eurographics Association.
Abstract
We describe a novel data structure for representing light transport called ray <br>map. The ray map extends the concept of photon maps: it stores not only photon <br>impacts but the whole photon paths. We demonstrate the utility of ray maps for <br>global illumination by eliminating boundary bias and reducing topological bias <br>of density estimation in global illumination. Thanks to the elimination of <br>boundary bias we could use ray maps for fast direct visualization with the <br>image quality being close to that obtained by the expensive final gathering <br>step. We describe in detail our implementation of the ray map using a lazily <br>constructed kD-tree. We also present several optimizations bringing the ray map <br>query performance close to the performance of the photon map.
Export
BibTeX
@inproceedings{Havran-et-al_EGSR05, TITLE = {Ray Maps for Global Illumination}, AUTHOR = {Havran, Vlastimil and Bittner, Jiri and Herzog, Robert and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-905673-23-1}, DOI = {10.2312/EGWR/EGSR05/043-054}, LOCALID = {Local-ID: C125675300671F7B-74BA222F1CD83A97C12570B30048FF26-Havran2005egsrRM}, PUBLISHER = {Eurographics Association}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {We describe a novel data structure for representing light transport called ray <br>map. The ray map extends the concept of photon maps: it stores not only photon <br>impacts but the whole photon paths. We demonstrate the utility of ray maps for <br>global illumination by eliminating boundary bias and reducing topological bias <br>of density estimation in global illumination. Thanks to the elimination of <br>boundary bias we could use ray maps for fast direct visualization with the <br>image quality being close to that obtained by the expensive final gathering <br>step. We describe in detail our implementation of the ray map using a lazily <br>constructed kD-tree. We also present several optimizations bringing the ray map <br>query performance close to the performance of the photon map.}, BOOKTITLE = {Rendering Techniques 2005: Eurographics Symposium on Rendering}, EDITOR = {Deussen, Oliver and Keller, Alexander and Bala, Kavita and Dutr{\'e}, Philip and Fellner, Dieter W. and Spencer, Stephen N.}, PAGES = {43--54}, ADDRESS = {Konstanz, Germany}, }
Endnote
%0 Conference Proceedings %A Havran, Vlastimil %A Bittner, Jiri %A Herzog, Robert %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Ray Maps for Global Illumination : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-277D-B %F EDOC: 279020 %F OTHER: Local-ID: C125675300671F7B-74BA222F1CD83A97C12570B30048FF26-Havran2005egsrRM %R 10.2312/EGWR/EGSR05/043-054 %D 2005 %B 16th Eurographics Symposium on Rendering %Z date of event: 2005-06-29 - 2005-07-01 %C Konstanz, Germany %X We describe a novel data structure for representing light transport called ray <br>map. The ray map extends the concept of photon maps: it stores not only photon <br>impacts but the whole photon paths. We demonstrate the utility of ray maps for <br>global illumination by eliminating boundary bias and reducing topological bias <br>of density estimation in global illumination. Thanks to the elimination of <br>boundary bias we could use ray maps for fast direct visualization with the <br>image quality being close to that obtained by the expensive final gathering <br>step. We describe in detail our implementation of the ray map using a lazily <br>constructed kD-tree. We also present several optimizations bringing the ray map <br>query performance close to the performance of the photon map. %B Rendering Techniques 2005: Eurographics Symposium on Rendering %E Deussen, Oliver; Keller, Alexander; Bala, Kavita; Dutr&#233;, Philip; Fellner, Dieter W.; Spencer, Stephen N. %P 43 - 54 %I Eurographics Association %@ 3-905673-23-1
Havran, V., Herzog, R., and Seidel, H.-P. 2005d. Fast Final Gathering via Reverse Photon Mapping. Computer Graphics Forum, Blackwell.
Abstract
We present a new algorithm for computing indirect illumination based on density <br>estimation similarly to photon mapping. We accelerate the search for final <br>gathering by reorganizing the computation in the reverse order. We use two <br>trees that organize spatially not only the position of photons but also the <br>position of final gather rays. The achieved speedup is algorithmic, the <br>performance improvement takes advantage of logarithmic complexity of searching <br>in trees. The algorithm requires almost no user settings unlike many known <br>acceleration techniques for photon mapping. The image quality is the same as <br>for traditional photon mapping with final gathering, since the algorithm does <br>not approximate or interpolate. Optionally, the algorithm can be combined with <br>other techniques such as density control and importance sampling. The algorithm <br>creates a coherent access pattern to the main memory. This further improves on <br>performance and also allows us to use efficient external data structures to <br>alleviate the increased memory requirements.
Export
BibTeX
@inproceedings{Havran-et-al_EUROGRAPHICS05, TITLE = {Fast Final Gathering via Reverse Photon Mapping}, AUTHOR = {Havran, Vlastimil and Herzog, Robert and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2005.00857.x}, LOCALID = {Local-ID: C125675300671F7B-0EECF60691827774C12570B3004AE164-Havran2005eg05}, PUBLISHER = {Blackwell}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {We present a new algorithm for computing indirect illumination based on density <br>estimation similarly to photon mapping. We accelerate the search for final <br>gathering by reorganizing the computation in the reverse order. We use two <br>trees that organize spatially not only the position of photons but also the <br>position of final gather rays. The achieved speedup is algorithmic, the <br>performance improvement takes advantage of logarithmic complexity of searching <br>in trees. The algorithm requires almost no user settings unlike many known <br>acceleration techniques for photon mapping. The image quality is the same as <br>for traditional photon mapping with final gathering, since the algorithm does <br>not approximate or interpolate. Optionally, the algorithm can be combined with <br>other techniques such as density control and importance sampling. The algorithm <br>creates a coherent access pattern to the main memory. This further improves on <br>performance and also allows us to use efficient external data structures to <br>alleviate the increased memory requirements.}, BOOKTITLE = {The European Association for Computer Graphics 26th Annual Conference: EUROGRAPHICS 2005}, EDITOR = {Alexa, Marc and Marks, Joe}, PAGES = {323--333}, JOURNAL = {Computer Graphics Forum}, VOLUME = {24}, ISSUE = {3}, ADDRESS = {Dublin, Ireland}, }
Endnote
%0 Conference Proceedings %A Havran, Vlastimil %A Herzog, Robert %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Fast Final Gathering via Reverse Photon Mapping : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-268A-6 %F EDOC: 279019 %F OTHER: Local-ID: C125675300671F7B-0EECF60691827774C12570B3004AE164-Havran2005eg05 %R 10.1111/j.1467-8659.2005.00857.x %D 2005 %B The European Association for Computer Graphics 26th Annual Conference %Z date of event: 2005-08-29 - %C Dublin, Ireland %X We present a new algorithm for computing indirect illumination based on density <br>estimation similarly to photon mapping. We accelerate the search for final <br>gathering by reorganizing the computation in the reverse order. We use two <br>trees that organize spatially not only the position of photons but also the <br>position of final gather rays. The achieved speedup is algorithmic, the <br>performance improvement takes advantage of logarithmic complexity of searching <br>in trees. The algorithm requires almost no user settings unlike many known <br>acceleration techniques for photon mapping. The image quality is the same as <br>for traditional photon mapping with final gathering, since the algorithm does <br>not approximate or interpolate. Optionally, the algorithm can be combined with <br>other techniques such as density control and importance sampling. The algorithm <br>creates a coherent access pattern to the main memory. This further improves on <br>performance and also allows us to use efficient external data structures to <br>alleviate the increased memory requirements. %B The European Association for Computer Graphics 26th Annual Conference: EUROGRAPHICS 2005 %E Alexa, Marc; Marks, Joe %P 323 - 333 %I Blackwell %J Computer Graphics Forum %V 24 %N 3 %I Blackwell-Wiley %@ false
Havran, V., Neummann, A., Zotti, G., Purgathofer, W., and Seidel, H.-P. 2005e. On Cross-validation and Resampling of BRDF Data Measurements. SCCG ’05: Proceedings of the 21st spring conference on Computer graphics, ACM.
Abstract
We discuss the validation of BTF data measurements by means used for BRDF <br>measurements. First, we show how to apply the Helmholtz reciprocity and <br>isotropy for a single data set. Second, we discuss a cross-validation for BRDF <br>measurement data obtained from two different measurement setups, where the <br>measurements are not calibrated or the level of accuracy is not known. We show <br>the practical problems encountered and the solutions we have used to validate <br>physical setup for four material samples. We describe a novel coordinate system <br>suitable for resampling the BRDF data from one data set to another data set. <br>Further, we show how the perceptually uniform color space CIELab is used for <br>cross-comparison of BRDF data measurements, which were not calibrated.
Export
BibTeX
@inproceedings{Havran-et-al_SCCG05, TITLE = {On Cross-validation and Resampling of {BRDF} Data Measurements}, AUTHOR = {Havran, Vlastimil and Neummann, Attila and Zotti, Georg and Purgathofer, Werner and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-59593-204-4}, DOI = {10.1145/1090122.1090149}, LOCALID = {Local-ID: C125675300671F7B-FBC758B85F8A3975C12571920041861B-Havran2005SCCGz}, PUBLISHER = {ACM}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {We discuss the validation of BTF data measurements by means used for BRDF <br>measurements. First, we show how to apply the Helmholtz reciprocity and <br>isotropy for a single data set. Second, we discuss a cross-validation for BRDF <br>measurement data obtained from two different measurement setups, where the <br>measurements are not calibrated or the level of accuracy is not known. We show <br>the practical problems encountered and the solutions we have used to validate <br>physical setup for four material samples. We describe a novel coordinate system <br>suitable for resampling the BRDF data from one data set to another data set. <br>Further, we show how the perceptually uniform color space CIELab is used for <br>cross-comparison of BRDF data measurements, which were not calibrated.}, BOOKTITLE = {SCCG '05: Proceedings of the 21st spring conference on Computer graphics}, PAGES = {161--168}, ADDRESS = {Budmerice, Slovakia}, }
Endnote
%0 Conference Proceedings %A Havran, Vlastimil %A Neummann, Attila %A Zotti, Georg %A Purgathofer, Werner %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T On Cross-validation and Resampling of BRDF Data Measurements : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-272E-0 %F EDOC: 279018 %F OTHER: Local-ID: C125675300671F7B-FBC758B85F8A3975C12571920041861B-Havran2005SCCGz %R 10.1145/1090122.1090149 %D 2005 %B 21st Spring Conference on Computer Graphics %Z date of event: 2005-05-12 - 2005-05-14 %C Budmerice, Slovakia %X We discuss the validation of BTF data measurements by means used for BRDF <br>measurements. First, we show how to apply the Helmholtz reciprocity and <br>isotropy for a single data set. Second, we discuss a cross-validation for BRDF <br>measurement data obtained from two different measurement setups, where the <br>measurements are not calibrated or the level of accuracy is not known. We show <br>the practical problems encountered and the solutions we have used to validate <br>physical setup for four material samples. We describe a novel coordinate system <br>suitable for resampling the BRDF data from one data set to another data set. <br>Further, we show how the perceptually uniform color space CIELab is used for <br>cross-comparison of BRDF data measurements, which were not calibrated. %B SCCG '05: Proceedings of the 21st spring conference on Computer graphics %P 161 - 168 %I ACM %@ 978-1-59593-204-4
Haber, J., Magnor, M., and Seidel, H.-P. 2005. Physically based Simulation of Twilight Phenomena. Transactions on Graphics24, 4.
Abstract
We present a physically based approach to compute the colors of the sky during the twilight period before sunrise and after sunset. The simulation is based on the theory of light scattering by small particles. A realistic atmosphere model is assumed, consisting of air molecules, aerosols, and water. Air density, aerosols, and relative humidity vary with altitude. In addition, the aerosol component varies in composition and particle size distribution. This allows us to realistically simulate twilight phenomena for a wide range of different climate conditions. Besides considering multiple Rayleigh and Mie scattering, we take into account wavelength-dependent refraction of direct sunlight as well as the shadow of the Earth. Incorporating several optimizations into the radiative transfer simulation, a photo-realistic hemispherical twilight sky is computed in less than two hours on a conventional PC. The resulting radiometric data is useful, for instance, for high-dynamic range environment mapping, outdoor global illumination calculations, mesopic vision research and optical aerosol load probing.
Export
BibTeX
@article{Haber:PBSTP, TITLE = {Physically based Simulation of Twilight Phenomena}, AUTHOR = {Haber, J{\"o}rg and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, URL = {http://portal.acm.org/citation.cfm?id=1095878.1095884}, DOI = {10.1145/1095878.1095884}, LOCALID = {Local-ID: C1256BDE005F57A8-8E73D8241FAFF20AC12570A7003C932E-Haber:PBSTP}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {We present a physically based approach to compute the colors of the sky during the twilight period before sunrise and after sunset. The simulation is based on the theory of light scattering by small particles. A realistic atmosphere model is assumed, consisting of air molecules, aerosols, and water. Air density, aerosols, and relative humidity vary with altitude. In addition, the aerosol component varies in composition and particle size distribution. This allows us to realistically simulate twilight phenomena for a wide range of different climate conditions. Besides considering multiple Rayleigh and Mie scattering, we take into account wavelength-dependent refraction of direct sunlight as well as the shadow of the Earth. Incorporating several optimizations into the radiative transfer simulation, a photo-realistic hemispherical twilight sky is computed in less than two hours on a conventional PC. The resulting radiometric data is useful, for instance, for high-dynamic range environment mapping, outdoor global illumination calculations, mesopic vision research and optical aerosol load probing.}, JOURNAL = {Transactions on Graphics}, VOLUME = {24}, NUMBER = {4}, PAGES = {1353--1373}, }
Endnote
%0 Journal Article %A Haber, J&#246;rg %A Magnor, Marcus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Physically based Simulation of Twilight Phenomena : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2880-A %F EDOC: 520732 %R 10.1145/1095878.1095884 %U http://portal.acm.org/citation.cfm?id=1095878.1095884 %F OTHER: Local-ID: C1256BDE005F57A8-8E73D8241FAFF20AC12570A7003C932E-Haber:PBSTP %7 2005 %D 2005 %* Review method: peer-reviewed %X We present a physically based approach to compute the colors of the sky during the twilight period before sunrise and after sunset. The simulation is based on the theory of light scattering by small particles. A realistic atmosphere model is assumed, consisting of air molecules, aerosols, and water. Air density, aerosols, and relative humidity vary with altitude. In addition, the aerosol component varies in composition and particle size distribution. This allows us to realistically simulate twilight phenomena for a wide range of different climate conditions. Besides considering multiple Rayleigh and Mie scattering, we take into account wavelength-dependent refraction of direct sunlight as well as the shadow of the Earth. Incorporating several optimizations into the radiative transfer simulation, a photo-realistic hemispherical twilight sky is computed in less than two hours on a conventional PC. The resulting radiometric data is useful, for instance, for high-dynamic range environment mapping, outdoor global illumination calculations, mesopic vision research and optical aerosol load probing. %J Transactions on Graphics %V 24 %N 4 %& 1353 %P 1353 - 1373 %I ACM %C New York, NY %@ false
Günther, J., Wald, I., and Seidel, H.-P. 2005a. Precomputed Light Sets for Fast High Quality Global Illumination. SIGGRAPH ’05: ACM SIGGRAPH 2005 Sketches, ACM.
Export
BibTeX
@inproceedings{DBLP:conf/siggraph/GuntherWS05, TITLE = {Precomputed Light Sets for Fast High Quality Global Illumination}, AUTHOR = {G{\"u}nther, Johannes and Wald, Ingo and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4503-7827-7}, DOI = {10.1145/1187112.1187242}, PUBLISHER = {ACM}, YEAR = {2005}, DATE = {2005}, BOOKTITLE = {SIGGRAPH '05: ACM SIGGRAPH 2005 Sketches}, EDITOR = {Buhler, Juan}, PAGES = {108}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A G&#252;nther, Johannes %A Wald, Ingo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Precomputed Light Sets for Fast High Quality Global Illumination : %G eng %U http://hdl.handle.net/21.11116/0000-000F-3CDC-7 %R 10.1145/1187112.1187242 %D 2005 %B International Conference on Computer Graphics and Interactive Techniques 2005 %Z date of event: 2005-07-31 - 2005-08-04 %C Los Angeles, CA, USA %B SIGGRAPH '05: ACM SIGGRAPH 2005 Sketches %E Buhler, Juan %P 108 %I ACM %@ 978-1-4503-7827-7
Günther, J., Chen, T., Goesele, M., Wald, I., and Seidel, H.-P. 2005b. Efficient Acquisition and Realistic Rendering of Car Paint. Vision, Modeling, and Visualization 2005 (VMV 2005), Akademische Verlagsgesellschaft Aka.
Abstract
The outside appearance of cars is mostly defined through only two distinct materials -- glass and car paint. While glass can rather easily be simulated by the simple physical laws of reflection and refraction, modeling car paint is more challenging. In this paper we present a framework for the efficient acquisition and realistic rendering of real-world car paint. This is achieved by building an easy-to-reproduce measuring setup, fitting the measured data to a general BRDF model for car paint, adding a component for simulating the sparkling effect of metallic paints, and rendering using a specially designed shader in a realtime ray tracer.
Export
BibTeX
@inproceedings{Guenther:2005:EAR, TITLE = {Efficient Acquisition and Realistic Rendering of Car Paint}, AUTHOR = {G{\"u}nther, Johannes and Chen, Tongbo and Goesele, Michael and Wald, Ingo and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-89838-068-8}, LOCALID = {Local-ID: C125675300671F7B-1A606C7B8BE90575C1257045002F16DA-Guenther:2005:EAR}, PUBLISHER = {Akademische Verlagsgesellschaft Aka}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {The outside appearance of cars is mostly defined through only two distinct materials -- glass and car paint. While glass can rather easily be simulated by the simple physical laws of reflection and refraction, modeling car paint is more challenging. In this paper we present a framework for the efficient acquisition and realistic rendering of real-world car paint. This is achieved by building an easy-to-reproduce measuring setup, fitting the measured data to a general BRDF model for car paint, adding a component for simulating the sparkling effect of metallic paints, and rendering using a specially designed shader in a realtime ray tracer.}, BOOKTITLE = {Vision, Modeling, and Visualization 2005 (VMV 2005)}, EDITOR = {Greiner, G{\"u}nther and Hornegger, Joachim and Niemann, Heinrich and Stamminger, Marc}, PAGES = {487--494}, ADDRESS = {Erlangen, Germany}, }
Endnote
%0 Conference Proceedings %A G&#252;nther, Johannes %A Chen, Tongbo %A Goesele, Michael %A Wald, Ingo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Efficient Acquisition and Realistic Rendering of Car Paint : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2656-B %F EDOC: 279023 %F OTHER: Local-ID: C125675300671F7B-1A606C7B8BE90575C1257045002F16DA-Guenther:2005:EAR %D 2005 %B Vision, Modeling, and Visualization 2005 %Z date of event: 2005-11-16 - 2005-11-18 %C Erlangen, Germany %X The outside appearance of cars is mostly defined through only two distinct materials -- glass and car paint. While glass can rather easily be simulated by the simple physical laws of reflection and refraction, modeling car paint is more challenging. In this paper we present a framework for the efficient acquisition and realistic rendering of real-world car paint. This is achieved by building an easy-to-reproduce measuring setup, fitting the measured data to a general BRDF model for car paint, adding a component for simulating the sparkling effect of metallic paints, and rendering using a specially designed shader in a realtime ray tracer. %B Vision, Modeling, and Visualization 2005 %E Greiner, G&#252;nther; Hornegger, Joachim; Niemann, Heinrich; Stamminger, Marc %P 487 - 494 %I Akademische Verlagsgesellschaft Aka %@ 3-89838-068-8
Gumhold, S., Karni, Z., Isenburg, M., and Seidel, H.-P. 2005a. Predictive Point-cloud Compression. ACM SIGGRAPH 2005 Sketches, ACM.
Export
BibTeX
@inproceedings{Gumhold-sig05s, TITLE = {Predictive Point-cloud Compression}, AUTHOR = {Gumhold, Stefan and Karni, Zachi and Isenburg, Martin and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4503-7827-7}, DOI = {10.1145/1187112.1187277}, PUBLISHER = {ACM}, YEAR = {2005}, DATE = {2005}, BOOKTITLE = {ACM SIGGRAPH 2005 Sketches}, EDITOR = {Buhler, Juan}, PAGES = {137}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Gumhold, Stefan %A Karni, Zachi %A Isenburg, Martin %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Predictive Point-cloud Compression : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0023-E6D5-0 %R 10.1145/1187112.1187277 %D 2005 %B ACM SIGGRAPH 2005 %Z date of event: 2005-07-31 - 2005-08-04 %C Los Angeles, CA, USA %B ACM SIGGRAPH 2005 Sketches %E Buhler, Juan %P 137 %I ACM %@ 978-1-4503-7827-7
Gumhold, S., Karni, Z., Isenburg, M., and Seidel, H.-P. 2005b. Predictive Point-cloud Compression. The Sixth Israel-Korea Bi-National Conference on New Technologies and Visualization Methods for Product Development, Technion.
Export
BibTeX
@inproceedings{Gumhold-ik05, TITLE = {Predictive Point-cloud Compression}, AUTHOR = {Gumhold, Stefan and Karni, Zachi and Isenburg, Martin and Seidel, Hans-Peter}, LANGUAGE = {eng}, PUBLISHER = {Technion}, YEAR = {2005}, DATE = {2005}, BOOKTITLE = {The Sixth Israel-Korea Bi-National Conference on New Technologies and Visualization Methods for Product Development}, PAGES = {125--129}, ADDRESS = {Haifa, Israel}, }
Endnote
%0 Conference Proceedings %A Gumhold, Stefan %A Karni, Zachi %A Isenburg, Martin %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Predictive Point-cloud Compression : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0023-E6DD-F %D 2005 %B The Sixth Israel-Korea Bi-National Conference on New Technologies and Visualization Methods for Product Development %Z date of event: 2005-11-08 - 2005-11-09 %C Haifa, Israel %B The Sixth Israel-Korea Bi-National Conference on New Technologies and Visualization Methods for Product Development %P 125 - 129 %I Technion
Greeff, M., Haber, J., and Seidel, H.-P. 2005. Nailing and Pinning: Adding Constraints to Inverse Kinematics. The 13-th International Conference in Central Europe on Computer Graphics, Visualization and Computer Vision 2005 in co-operation with EUROGRAPHICS WSCG 2005 ; Short Papers, UNION Agency.
Abstract
Inverse kinematics is commonly applied to compute the resulting movement of an<br> avatar for a prescribed target pose. The motion path computed by<br> inverse kinematics, however, often differs from the expected or desired<br> result due to an underconstrained parameter space of the degrees-of-freedom<br> of all joints. In such cases, it is necessary to introduce additional<br> constraints, for instance by locking a joint's position and/or rotation.<br> We present a method to fix a joint in terms of position and/or rotation <br> and explain how to incorporate these constraints into the inverse<br> kinematics solution.
Export
BibTeX
@inproceedings{Greeff2005a, TITLE = {Nailing and Pinning: Adding Constraints to Inverse Kinematics}, AUTHOR = {Greeff, Mard{\'e} and Haber, J{\"o}rg and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {80-903100-9-5}, LOCALID = {Local-ID: C125675300671F7B-C3AAF6A50EE2A0E0C1256FAF0041E58A-Greeff2005a}, PUBLISHER = {UNION Agency}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {Inverse kinematics is commonly applied to compute the resulting movement of an<br> avatar for a prescribed target pose. The motion path computed by<br> inverse kinematics, however, often differs from the expected or desired<br> result due to an underconstrained parameter space of the degrees-of-freedom<br> of all joints. In such cases, it is necessary to introduce additional<br> constraints, for instance by locking a joint's position and/or rotation.<br> We present a method to fix a joint in terms of position and/or rotation <br> and explain how to incorporate these constraints into the inverse<br> kinematics solution.}, BOOKTITLE = {The 13-th International Conference in Central Europe on Computer Graphics, Visualization and Computer Vision 2005 in co-operation with EUROGRAPHICS WSCG 2005 ; Short Papers}, EDITOR = {Skala, Vaclav}, PAGES = {125--128}, SERIES = {Short Paper Proceedings}, ADDRESS = {Plzen, Czech Republic}, }
Endnote
%0 Conference Proceedings %A Greeff, Mard&#233; %A Haber, J&#246;rg %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Nailing and Pinning: Adding Constraints to Inverse Kinematics : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2723-6 %F EDOC: 278949 %F OTHER: Local-ID: C125675300671F7B-C3AAF6A50EE2A0E0C1256FAF0041E58A-Greeff2005a %D 2005 %B The 13-th International Conference in Central Europe on Computer Graphics, Visualization and Computer Vision %Z date of event: 2005-01-31 - 2005-02-04 %C Plzen, Czech Republic %X Inverse kinematics is commonly applied to compute the resulting movement of an<br> avatar for a prescribed target pose. The motion path computed by<br> inverse kinematics, however, often differs from the expected or desired<br> result due to an underconstrained parameter space of the degrees-of-freedom<br> of all joints. In such cases, it is necessary to introduce additional<br> constraints, for instance by locking a joint's position and/or rotation.<br> We present a method to fix a joint in terms of position and/or rotation <br> and explain how to incorporate these constraints into the inverse<br> kinematics solution. %B The 13-th International Conference in Central Europe on Computer Graphics, Visualization and Computer Vision 2005 in co-operation with EUROGRAPHICS WSCG 2005 ; Short Papers %E Skala, Vaclav %P 125 - 128 %I UNION Agency %@ 80-903100-9-5 %B Short Paper Proceedings
Galic, I., Weickert, J., Welk, M., Bruhn, A., Belyaev, A., and Seidel, H.-P. 2005. Towards PDE-based Image Compression. Variational, Geometric, and Level Set Methods in Computer Vision (VLSM 2005), Springer.
Export
BibTeX
@inproceedings{Galic-et-al_VLSM05, TITLE = {Towards {PDE}-based Image Compression}, AUTHOR = {Galic, Irena and Weickert, Joachim and Welk, Martin and Bruhn, Andres and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-540-29348-5}, DOI = {10.1007/11567646_4}, LOCALID = {Local-ID: C125675300671F7B-CD07401B55F47EBAC12570F8003BF5F5-Belyaev_vlsm05}, PUBLISHER = {Springer}, YEAR = {2005}, DATE = {2005}, BOOKTITLE = {Variational, Geometric, and Level Set Methods in Computer Vision (VLSM 2005)}, EDITOR = {Paragios, Nikos and Faugeras, Olivier and Chan, Tony and Schnoerr, Christoph}, PAGES = {37--48}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {3752}, ADDRESS = {Beijing, China}, }
Endnote
%0 Conference Proceedings %A Galic, Irena %A Weickert, Joachim %A Welk, Martin %A Bruhn, Andres %A Belyaev, Alexander %A Seidel, Hans-Peter %+ International Max Planck Research School, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Towards PDE-based Image Compression : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2817-9 %F EDOC: 279026 %F OTHER: Local-ID: C125675300671F7B-CD07401B55F47EBAC12570F8003BF5F5-Belyaev_vlsm05 %R 10.1007/11567646_4 %D 2005 %B Third International Workshop on Variational, Geometric, and Level Set Methods in Computer Vision %Z date of event: 2005-10-16 - %C Beijing, China %B Variational, Geometric, and Level Set Methods in Computer Vision %E Paragios, Nikos; Faugeras, Olivier; Chan, Tony; Schnoerr, Christoph %P 37 - 48 %I Springer %@ 3-540-29348-5 %B Lecture Notes in Computer Science %N 3752 %U https://rdcu.be/dGwUL
Fuchs, M., Blanz, V., and Seidel, H.-P. 2005a. Bayesian Relighting. Rendering Techniques 2005: Eurographics Symposium on Rendering Techniques (EGSR 2005), Eurographics Association.
Abstract
We present a simple method for relighting real objects viewed from a <br> fixed camera position. Instead of setting up a calibrated<br> measurement device, such as a light stage, we manually sweep a spotlight<br> over the walls of a white room, illuminating the object indirectly.<br> In contrast<br> to previous methods, we use arbitrary and unknown angular distributions of<br> incoming light. Neither the incident light nor the reflectance function<br> need to be represented explicitly in our approach.\\<br>%<br> The new method relies on images of a probe object, for instance a black<br> snooker ball, placed near the target object. Pictures of the probe in a novel<br> illumination are decomposed into a linear combination of measured images of<br> the probe. Then, a linear combination of images of the target object with the<br> same coefficients produces a synthetic image with the new illumination. We<br> use a simple Bayesian approach to find the most plausible output image, given<br> the picture of the probe and the statistics<br> observed in the dataset of samples.\\<br>%<br> Our results for a variety of novel illuminations, including synthetic lighting<br> by relatively narrow light sources as well as natural illuminations, <br> demonstrate that the new technique is a<br> useful, low cost alternative to existing techniques for a broad range of<br> objects and materials.
Export
BibTeX
@inproceedings{Fuchs-et-al_EGSR05, TITLE = {Bayesian Relighting}, AUTHOR = {Fuchs, Martin and Blanz, Volker and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-905673-23-1}, DOI = {10.2312/EGWR/EGSR05/157-164}, LOCALID = {Local-ID: C125675300671F7B-C4B00B3891C1EEE0C125704A007977D5-Fuchs2005b}, PUBLISHER = {Eurographics Association}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {We present a simple method for relighting real objects viewed from a <br> fixed camera position. Instead of setting up a calibrated<br> measurement device, such as a light stage, we manually sweep a spotlight<br> over the walls of a white room, illuminating the object indirectly.<br> In contrast<br> to previous methods, we use arbitrary and unknown angular distributions of<br> incoming light. Neither the incident light nor the reflectance function<br> need to be represented explicitly in our approach.\\<br>%<br> The new method relies on images of a probe object, for instance a black<br> snooker ball, placed near the target object. Pictures of the probe in a novel<br> illumination are decomposed into a linear combination of measured images of<br> the probe. Then, a linear combination of images of the target object with the<br> same coefficients produces a synthetic image with the new illumination. We<br> use a simple Bayesian approach to find the most plausible output image, given<br> the picture of the probe and the statistics<br> observed in the dataset of samples.\\<br>%<br> Our results for a variety of novel illuminations, including synthetic lighting<br> by relatively narrow light sources as well as natural illuminations, <br> demonstrate that the new technique is a<br> useful, low cost alternative to existing techniques for a broad range of<br> objects and materials.}, BOOKTITLE = {Rendering Techniques 2005: Eurographics Symposium on Rendering Techniques (EGSR 2005)}, EDITOR = {Deussen, Oliver and Keller, Alexander and Bala, Kavita and Dutr{\'e}, Philip and Fellner, Dieter W. and Spencer, Stephen N.}, PAGES = {157--164}, ADDRESS = {Konstanz, Germany}, }
Endnote
%0 Conference Proceedings %A Fuchs, Martin %A Blanz, Volker %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Bayesian Relighting : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-25ED-0 %F EDOC: 279055 %F OTHER: Local-ID: C125675300671F7B-C4B00B3891C1EEE0C125704A007977D5-Fuchs2005b %R 10.2312/EGWR/EGSR05/157-164 %D 2005 %B 16th Eurographics Symposium on Rendering Techniques %Z date of event: 2005-06-29 - 2005-07-01 %C Konstanz, Germany %X We present a simple method for relighting real objects viewed from a <br> fixed camera position. Instead of setting up a calibrated<br> measurement device, such as a light stage, we manually sweep a spotlight<br> over the walls of a white room, illuminating the object indirectly.<br> In contrast<br> to previous methods, we use arbitrary and unknown angular distributions of<br> incoming light. Neither the incident light nor the reflectance function<br> need to be represented explicitly in our approach.\\<br>%<br> The new method relies on images of a probe object, for instance a black<br> snooker ball, placed near the target object. Pictures of the probe in a novel<br> illumination are decomposed into a linear combination of measured images of<br> the probe. Then, a linear combination of images of the target object with the<br> same coefficients produces a synthetic image with the new illumination. We<br> use a simple Bayesian approach to find the most plausible output image, given<br> the picture of the probe and the statistics<br> observed in the dataset of samples.\\<br>%<br> Our results for a variety of novel illuminations, including synthetic lighting<br> by relatively narrow light sources as well as natural illuminations, <br> demonstrate that the new technique is a<br> useful, low cost alternative to existing techniques for a broad range of<br> objects and materials. %B Rendering Techniques 2005: Eurographics Symposium on Rendering Techniques %E Deussen, Oliver; Keller, Alexander; Bala, Kavita; Dutr&#233;, Philip; Fellner, Dieter W.; Spencer, Stephen N. %P 157 - 164 %I Eurographics Association %@ 3-905673-23-1
Fuchs, M., Blanz, V., Lensch, H.P.A., and Seidel, H.-P. 2005b. Reflectance from Images: A Model-Based Approach for Human Faces. IEEE Transactions on Visualization and Computer Graphics11, 3.
Abstract
In this paper, we present an image-based framework that acquires<br> the reflectance properties of a human face. <br> A range scan of the face is not required.<br> Based on a morphable face model, the system estimates the 3D<br> shape, and establishes point-to-point correspondence across images taken from<br> different viewpoints, and across different individuals' faces.<br>% <br> This provides a common parameterization of all reconstructed surfaces that can<br> be used to compare and transfer BRDF data between different faces. Shape<br> estimation from images compensates deformations of the face during the<br> measurement process, such as facial expressions.<br> <br> In the common parameterization, regions of homogeneous materials on the face<br> surface can be defined a-priori. We apply analytical BRDF models to express<br> the reflectance properties of each region, and we estimate their parameters in<br> a least-squares fit from the image data. For each of the surface points, the<br> diffuse component of the BRDF is locally refined, which provides high detail.<br>% <br> We present results for multiple analytical BRDF models, rendered at novel<br> orientations and lighting conditions.
Export
BibTeX
@article{Fuchs-et-al_TVCG05, TITLE = {Reflectance from Images: A Model-Based Approach for Human Faces}, AUTHOR = {Fuchs, Martin and Blanz, Volker and Lensch, Hendrik P. A. and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2005.47}, LOCALID = {Local-ID: C125675300671F7B-40B8185E1038C225C1256FAF003BD72B-Fuchs2005a}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {New York, NY}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {In this paper, we present an image-based framework that acquires<br> the reflectance properties of a human face. <br> A range scan of the face is not required.<br> Based on a morphable face model, the system estimates the 3D<br> shape, and establishes point-to-point correspondence across images taken from<br> different viewpoints, and across different individuals' faces.<br>% <br> This provides a common parameterization of all reconstructed surfaces that can<br> be used to compare and transfer BRDF data between different faces. Shape<br> estimation from images compensates deformations of the face during the<br> measurement process, such as facial expressions.<br> <br> In the common parameterization, regions of homogeneous materials on the face<br> surface can be defined a-priori. We apply analytical BRDF models to express<br> the reflectance properties of each region, and we estimate their parameters in<br> a least-squares fit from the image data. For each of the surface points, the<br> diffuse component of the BRDF is locally refined, which provides high detail.<br>% <br> We present results for multiple analytical BRDF models, rendered at novel<br> orientations and lighting conditions.}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics}, VOLUME = {11}, NUMBER = {3}, PAGES = {296--305}, }
Endnote
%0 Journal Article %A Fuchs, Martin %A Blanz, Volker %A Lensch, Hendrik P. A. %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Reflectance from Images: A Model-Based Approach for Human Faces : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2784-A %F EDOC: 279056 %F OTHER: Local-ID: C125675300671F7B-40B8185E1038C225C1256FAF003BD72B-Fuchs2005a %R 10.1109/TVCG.2005.47 %D 2005 %* Review method: peer-reviewed %X In this paper, we present an image-based framework that acquires<br> the reflectance properties of a human face. <br> A range scan of the face is not required.<br> Based on a morphable face model, the system estimates the 3D<br> shape, and establishes point-to-point correspondence across images taken from<br> different viewpoints, and across different individuals' faces.<br>% <br> This provides a common parameterization of all reconstructed surfaces that can<br> be used to compare and transfer BRDF data between different faces. Shape<br> estimation from images compensates deformations of the face during the<br> measurement process, such as facial expressions.<br> <br> In the common parameterization, regions of homogeneous materials on the face<br> surface can be defined a-priori. We apply analytical BRDF models to express<br> the reflectance properties of each region, and we estimate their parameters in<br> a least-squares fit from the image data. For each of the surface points, the<br> diffuse component of the BRDF is locally refined, which provides high detail.<br>% <br> We present results for multiple analytical BRDF models, rendered at novel<br> orientations and lighting conditions. %J IEEE Transactions on Visualization and Computer Graphics %V 11 %N 3 %& 296 %P 296 - 305 %I IEEE Computer Society %C New York, NY %@ false
Fuchs, M., Blanz, V., Lensch, H.P.A., and Seidel, H.-P. 2005c. Reflectance from images: a model-based approach for human faces. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
In this paper, we present an image-based framework that acquires the reflectance properties of a human face. A range scan of the face is not required. Based on a morphable face model, the system estimates the 3D shape, and establishes point-to-point correspondence across images taken from different viewpoints, and across different individuals' faces. This provides a common parameterization of all reconstructed surfaces that can be used to compare and transfer BRDF data between different faces. Shape estimation from images compensates deformations of the face during the measurement process, such as facial expressions. In the common parameterization, regions of homogeneous materials on the face surface can be defined a-priori. We apply analytical BRDF models to express the reflectance properties of each region, and we estimate their parameters in a least-squares fit from the image data. For each of the surface points, the diffuse component of the BRDF is locally refined, which provides high detail. We present results for multiple analytical BRDF models, rendered at novelorientations and lighting conditions.
Export
BibTeX
@techreport{FuchsBlanzLenschSeidel2005, TITLE = {Reflectance from images: a model-based approach for human faces}, AUTHOR = {Fuchs, Martin and Blanz, Volker and Lensch, Hendrik P. A. and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2005-4-001}, NUMBER = {MPI-I-2005-4-001}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {In this paper, we present an image-based framework that acquires the reflectance properties of a human face. A range scan of the face is not required. Based on a morphable face model, the system estimates the 3D shape, and establishes point-to-point correspondence across images taken from different viewpoints, and across different individuals' faces. This provides a common parameterization of all reconstructed surfaces that can be used to compare and transfer BRDF data between different faces. Shape estimation from images compensates deformations of the face during the measurement process, such as facial expressions. In the common parameterization, regions of homogeneous materials on the face surface can be defined a-priori. We apply analytical BRDF models to express the reflectance properties of each region, and we estimate their parameters in a least-squares fit from the image data. For each of the surface points, the diffuse component of the BRDF is locally refined, which provides high detail. We present results for multiple analytical BRDF models, rendered at novelorientations and lighting conditions.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Fuchs, Martin %A Blanz, Volker %A Lensch, Hendrik P. A. %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Reflectance from images: a model-based approach for human faces : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-683F-C %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2005-4-001 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2005 %P 33 p. %X In this paper, we present an image-based framework that acquires the reflectance properties of a human face. A range scan of the face is not required. Based on a morphable face model, the system estimates the 3D shape, and establishes point-to-point correspondence across images taken from different viewpoints, and across different individuals' faces. This provides a common parameterization of all reconstructed surfaces that can be used to compare and transfer BRDF data between different faces. Shape estimation from images compensates deformations of the face during the measurement process, such as facial expressions. In the common parameterization, regions of homogeneous materials on the face surface can be defined a-priori. We apply analytical BRDF models to express the reflectance properties of each region, and we estimate their parameters in a least-squares fit from the image data. For each of the surface points, the diffuse component of the BRDF is locally refined, which provides high detail. We present results for multiple analytical BRDF models, rendered at novelorientations and lighting conditions. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Fuchs, C., Goesele, M., Chen, T., and Seidel, H.-P. 2005d. An Empirical Model for Heterogeneous Translucent Objects. SIGGRAPH 2005 Sketches, ACM.
Export
BibTeX
@inproceedings{Fuchs-et-al_SIGGRAPH05, TITLE = {An Empirical Model for Heterogeneous Translucent Objects}, AUTHOR = {Fuchs, Christian and Goesele, Michael and Chen, Tongbo and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4503-7827-7}, DOI = {10.1145/1187112.1187139}, LOCALID = {Local-ID: C125675300671F7B-D546DA4BEA221DEDC12570030043F157-Fuchs:2005:EMH}, PUBLISHER = {ACM}, YEAR = {2005}, DATE = {2005}, BOOKTITLE = {SIGGRAPH 2005 Sketches}, EDITOR = {Buhler, Juan}, PAGES = {24}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Fuchs, Christian %A Goesele, Michael %A Chen, Tongbo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T An Empirical Model for Heterogeneous Translucent Objects : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-25A5-0 %F EDOC: 279057 %F OTHER: Local-ID: C125675300671F7B-D546DA4BEA221DEDC12570030043F157-Fuchs:2005:EMH %R 10.1145/1187112.1187139 %D 2005 %B International Conference on Computer Graphics and Interactive Techniques 2005 %Z date of event: 2005-07-31 - 2005-08-04 %C Los Angeles, CA, USA %B SIGGRAPH 2005 Sketches %E Buhler, Juan %P 24 %I ACM %@ 978-1-4503-7827-7
Fuchs, C., Gösele, M., Chen, T., and Seidel, H.-P. 2005e. An emperical model for heterogeneous translucent objects. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
We introduce an empirical model for multiple scattering in heterogeneous translucent objects for which classical approximations such as the dipole approximation to the di usion equation are no longer valid. Motivated by the exponential fall-o of scattered intensity with distance, di use subsurface scattering is represented as a sum of exponentials per surface point plus a modulation texture. Modeling quality can be improved by using an anisotropic model where exponential parameters are determined per surface location and scattering direction. We validate the scattering model for a set of planar object samples which were recorded under controlled conditions and quantify the modeling error. Furthermore, several translucent objects with complex geometry are captured and compared to the real object under similar illumination conditions.
Export
BibTeX
@techreport{FuchsGoeseleChenSeidel, TITLE = {An emperical model for heterogeneous translucent objects}, AUTHOR = {Fuchs, Christian and G{\"o}sele, Michael and Chen, Tongbo and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2005-4-006}, NUMBER = {MPI-I-2005-4-006}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {We introduce an empirical model for multiple scattering in heterogeneous translucent objects for which classical approximations such as the dipole approximation to the di usion equation are no longer valid. Motivated by the exponential fall-o of scattered intensity with distance, di use subsurface scattering is represented as a sum of exponentials per surface point plus a modulation texture. Modeling quality can be improved by using an anisotropic model where exponential parameters are determined per surface location and scattering direction. We validate the scattering model for a set of planar object samples which were recorded under controlled conditions and quantify the modeling error. Furthermore, several translucent objects with complex geometry are captured and compared to the real object under similar illumination conditions.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Fuchs, Christian %A G&#246;sele, Michael %A Chen, Tongbo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T An emperical model for heterogeneous translucent objects : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-682F-0 %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2005-4-006 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2005 %P 20 p. %X We introduce an empirical model for multiple scattering in heterogeneous translucent objects for which classical approximations such as the dipole approximation to the di usion equation are no longer valid. Motivated by the exponential fall-o of scattered intensity with distance, di use subsurface scattering is represented as a sum of exponentials per surface point plus a modulation texture. Modeling quality can be improved by using an anisotropic model where exponential parameters are determined per surface location and scattering direction. We validate the scattering model for a set of planar object samples which were recorded under controlled conditions and quantify the modeling error. Furthermore, several translucent objects with complex geometry are captured and compared to the real object under similar illumination conditions. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Fenchel, M., Gumhold, S., and Seidel, H.-P. 2005. Dynamic Surface Reconstruction from 4D-MR Images. Vision, Modeling, and Visualization 2005 (VMV 2005), Akademische Verlagsgesellschaft Aka.
Abstract
In this work we propose a novel approach for realistic fire animation and manipulation. We apply a statistical learning method to an image sequence of a real-world flame to jointly capture flame motion and appearance characteristics. A low-dimensional generic flame model is then robustly matched to the video images. The model parameter values are used as input to drive an Expectation-Maximization algorithm to learn an {\em auto regressive process} with respect to flame dynamics. The generic flame model and the trained motion model enable us to synthesize new, unique flame sequences of arbitrary length in real-time.
Export
BibTeX
@inproceedings{Fenchel_VMV05, TITLE = {Dynamic Surface Reconstruction from {4D-MR} Images}, AUTHOR = {Fenchel, Matthias and Gumhold, Stefan and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-89838-068-8}, PUBLISHER = {Akademische Verlagsgesellschaft Aka}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {In this work we propose a novel approach for realistic fire animation and manipulation. We apply a statistical learning method to an image sequence of a real-world flame to jointly capture flame motion and appearance characteristics. A low-dimensional generic flame model is then robustly matched to the video images. The model parameter values are used as input to drive an Expectation-Maximization algorithm to learn an {\em auto regressive process} with respect to flame dynamics. The generic flame model and the trained motion model enable us to synthesize new, unique flame sequences of arbitrary length in real-time.}, BOOKTITLE = {Vision, Modeling, and Visualization 2005 (VMV 2005)}, EDITOR = {Greiner, G{\"u}nther and Hornegger, Joachim and Niemann, Heinrich and Stamminger, Marc}, PAGES = {249--256}, ADDRESS = {Erlangen, Germany}, }
Endnote
%0 Conference Proceedings %A Fenchel, Matthias %A Gumhold, Stefan %A Seidel, Hans-Peter %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Dynamic Surface Reconstruction from 4D-MR Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0023-E6EB-F %D 2005 %B Vision, Modeling, and Visualization 2005 %Z date of event: 2005-11-16 - 2005-11-18 %C Erlangen, Germany %X In this work we propose a novel approach for realistic fire animation and manipulation. We apply a statistical learning method to an image sequence of a real-world flame to jointly capture flame motion and appearance characteristics. A low-dimensional generic flame model is then robustly matched to the video images. The model parameter values are used as input to drive an Expectation-Maximization algorithm to learn an {\em auto regressive process} with respect to flame dynamics. The generic flame model and the trained motion model enable us to synthesize new, unique flame sequences of arbitrary length in real-time. %B Vision, Modeling, and Visualization 2005 %E Greiner, G&#252;nther; Hornegger, Joachim; Niemann, Heinrich; Stamminger, Marc %P 249 - 256 %I Akademische Verlagsgesellschaft Aka %@ 3-89838-068-8
Efremov, A., Havran, V., and Seidel, H.-P. 2005a. Robust and Numerically Stable Bézier Clipping Method for Ray Tracing NURBS Surfaces. SCCG ’05: Proceedings of the 21st Spring Conference on Computer Graphics, ACM.
Abstract
Raytracing has become a popular method for generating realistic images and <br>movies. The progress in hardware development shows that the real time <br>raytracing on a single PC might be possible in the ongoing future. Obviously, <br>that new generation of raytracing based applications will require more <br>visualization precision and flexibility. Most of the modern raytracing based <br>applications only deal with triangles as basic primitives, which brings <br>limitations to an application and may cause visual artifacts to appear. NURBS <br>surface representation is common for most of 3D modeling tools because of its <br>compactness and useful geometric properties of NURBS surfaces. Using the direct <br>raytracing NURBS surfaces, one can achieve better quality of rendered images. <br>Although, many such approaches have already been presented, almost all of them <br>suffer from numerical problems or do not work in some special cases. This paper <br>presents a modified Bézier clipping method for finding ray - NURBS surface <br>intersection points, which is fast, robust, and numerically stable.
Export
BibTeX
@inproceedings{Efremov-et-al_SCCG05, TITLE = {Robust and Numerically Stable B{\'e}zier Clipping Method for Ray Tracing {NURBS} Surfaces}, AUTHOR = {Efremov, Alexander and Havran, Vlastimil and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-59593-204-4}, DOI = {10.1145/1090122.1090144}, LOCALID = {Local-ID: C125675300671F7B-C96DF63B5C5B2562C12570B300466F24-Efremov2005SCCG}, PUBLISHER = {ACM}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {Raytracing has become a popular method for generating realistic images and <br>movies. The progress in hardware development shows that the real time <br>raytracing on a single PC might be possible in the ongoing future. Obviously, <br>that new generation of raytracing based applications will require more <br>visualization precision and flexibility. Most of the modern raytracing based <br>applications only deal with triangles as basic primitives, which brings <br>limitations to an application and may cause visual artifacts to appear. NURBS <br>surface representation is common for most of 3D modeling tools because of its <br>compactness and useful geometric properties of NURBS surfaces. Using the direct <br>raytracing NURBS surfaces, one can achieve better quality of rendered images. <br>Although, many such approaches have already been presented, almost all of them <br>suffer from numerical problems or do not work in some special cases. This paper <br>presents a modified B{\'e}zier clipping method for finding ray -- NURBS surface <br>intersection points, which is fast, robust, and numerically stable.}, BOOKTITLE = {SCCG '05: Proceedings of the 21st Spring Conference on Computer Graphics}, PAGES = {127--135}, ADDRESS = {Budmerice, Slovakia}, }
Endnote
%0 Conference Proceedings %A Efremov, Alexander %A Havran, Vlastimil %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Robust and Numerically Stable B&#233;zier Clipping Method for Ray Tracing NURBS Surfaces : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2794-6 %F EDOC: 279029 %F OTHER: Local-ID: C125675300671F7B-C96DF63B5C5B2562C12570B300466F24-Efremov2005SCCG %R 10.1145/1090122.1090144 %D 2005 %B 21st Spring Conference on Computer Graphics %Z date of event: 2005-05-12 - 2005-05-14 %C Budmerice, Slovakia %X Raytracing has become a popular method for generating realistic images and <br>movies. The progress in hardware development shows that the real time <br>raytracing on a single PC might be possible in the ongoing future. Obviously, <br>that new generation of raytracing based applications will require more <br>visualization precision and flexibility. Most of the modern raytracing based <br>applications only deal with triangles as basic primitives, which brings <br>limitations to an application and may cause visual artifacts to appear. NURBS <br>surface representation is common for most of 3D modeling tools because of its <br>compactness and useful geometric properties of NURBS surfaces. Using the direct <br>raytracing NURBS surfaces, one can achieve better quality of rendered images. <br>Although, many such approaches have already been presented, almost all of them <br>suffer from numerical problems or do not work in some special cases. This paper <br>presents a modified B&#233;zier clipping method for finding ray - NURBS surface <br>intersection points, which is fast, robust, and numerically stable. %B SCCG '05: Proceedings of the 21st Spring Conference on Computer Graphics %P 127 - 135 %I ACM %@ 978-1-59593-204-4
Efremov, A., Havran, V., and Seidel, H.-P. 2005b. Robust and Numerically Stable Bezier Clipping Method for Ray Tracing NURBS Surfaces. 21st Spring Conference on Computer Graphics (SCCG 2005), Comenius University.
Export
BibTeX
@inproceedings{Efremov2005SCCGz, TITLE = {Robust and Numerically Stable Bezier Clipping Method for Ray Tracing {NURBS} Surfaces}, AUTHOR = {Efremov, Alexander and Havran, Vlastimil and Seidel, Hans-Peter}, EDITOR = {J{\"u}ttler, Bert}, LANGUAGE = {eng}, ISBN = {80-223-2057-9}, LOCALID = {Local-ID: C125675300671F7B-4295A90B21C0D5C0C125719200453F37-Efremov2005SCCGz}, PUBLISHER = {Comenius University}, YEAR = {2005}, DATE = {2005}, BOOKTITLE = {21st Spring Conference on Computer Graphics (SCCG 2005)}, PAGES = {123--131}, }
Endnote
%0 Conference Proceedings %A Efremov, Alexander %A Havran, Vlastimil %A Seidel, Hans-Peter %E J&#252;ttler, Bert %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Robust and Numerically Stable Bezier Clipping Method for Ray Tracing NURBS Surfaces : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2792-A %F EDOC: 279028 %F OTHER: Local-ID: C125675300671F7B-4295A90B21C0D5C0C125719200453F37-Efremov2005SCCGz %D 2005 %B Untitled Event %Z date of event: 2005-05-12 - %C Budmerice, Slovakia %B 21st Spring Conference on Computer Graphics (SCCG 2005) %P 123 - 131 %I Comenius University %@ 80-223-2057-9
De Aguiar, E., Theobalt, C., Magnor, M., and Seidel, H.-P. 2005. Reconstructing Human Shape and Motion from Multi-View Video. 2nd European Conference on Visual Media Production (CVMP), The IEE.
Abstract
In model-based free-viewpoint video, a detailed representation of the time-varying geometry of a real-word scene is used to generate renditions of it from novel viewpoints. In this paper, we present a method for reconstructing such a dynamic geometry model of a human actor from multi-view video. In a two-step procedure, first the spatio-temporally consistent shape and poses of a generic human body model are estimated by means of a silhouette-based analysis-by-synthesis method. In a second step, subtle details in surface geometry that are specific to each particular time step are recovered by enforcing a color-consistency criterion. By this means, we generate a realistic representation of the time-varying geometry of a moving person that also reproduces these dynamic surface variations.
Export
BibTeX
@inproceedings{deAguiarCVMP05, TITLE = {Reconstructing Human Shape and Motion from Multi-View Video}, AUTHOR = {de Aguiar, Edilson and Theobalt, Christian and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-86341-583-0}, LOCALID = {Local-ID: C125675300671F7B-AFC01E1A3BC54B8DC12570CF002FB8B7-deAguiarCVMP05}, PUBLISHER = {The IEE}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {In model-based free-viewpoint video, a detailed representation of the time-varying geometry of a real-word scene is used to generate renditions of it from novel viewpoints. In this paper, we present a method for reconstructing such a dynamic geometry model of a human actor from multi-view video. In a two-step procedure, first the spatio-temporally consistent shape and poses of a generic human body model are estimated by means of a silhouette-based analysis-by-synthesis method. In a second step, subtle details in surface geometry that are specific to each particular time step are recovered by enforcing a color-consistency criterion. By this means, we generate a realistic representation of the time-varying geometry of a moving person that also reproduces these dynamic surface variations.}, BOOKTITLE = {2nd European Conference on Visual Media Production (CVMP)}, PAGES = {42--49}, }
Endnote
%0 Conference Proceedings %A de Aguiar, Edilson %A Theobalt, Christian %A Magnor, Marcus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Programming Logics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Reconstructing Human Shape and Motion from Multi-View Video : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-277F-7 %F EDOC: 279030 %F OTHER: Local-ID: C125675300671F7B-AFC01E1A3BC54B8DC12570CF002FB8B7-deAguiarCVMP05 %I The IEE %D 2005 %B Untitled Event %Z date of event: 2005-11-30 - %C London, UK %X In model-based free-viewpoint video, a detailed representation of the time-varying geometry of a real-word scene is used to generate renditions of it from novel viewpoints. In this paper, we present a method for reconstructing such a dynamic geometry model of a human actor from multi-view video. In a two-step procedure, first the spatio-temporally consistent shape and poses of a generic human body model are estimated by means of a silhouette-based analysis-by-synthesis method. In a second step, subtle details in surface geometry that are specific to each particular time step are recovered by enforcing a color-consistency criterion. By this means, we generate a realistic representation of the time-varying geometry of a moving person that also reproduces these dynamic surface variations. %B 2nd European Conference on Visual Media Production (CVMP) %P 42 - 49 %I The IEE %@ 0-86341-583-0
Brabec, S., Annen, T., and Seidel, H.-P. 2005. Practical Shadow Mapping. In: R. Barzel, ed., Graphics Tools: The jgt Editors’ Choice. A.K. Peters, Wellesley, Massachusetts, USA.
Abstract
In this paper, we present several methods that can greatly improve image quality when using the shadow mapping algorithm. Shadow artifacts introduced by shadow mapping are mainly due to low resolution shadow maps and/or the limited numerical precision used when performing the shadow test. These problems especially arise when the light source’s viewing frustum, from which the shadow map is generated, is not adjusted to the actual camera view. We show how a tight-fitting frustum can be computed such that the shadow mapping algorithm concentrates on the visible parts of the scene and takes advantage of nearly the full available precision. Furthermore, we recommend uniformly spaced depth values in contrast to perspectively spaced depths in order to equally sample the scene seen from the light source.
Export
BibTeX
@incollection{JGTECV2005, TITLE = {Practical Shadow Mapping}, AUTHOR = {Brabec, Stefan and Annen, Thomas and Seidel, Hans-Peter}, EDITOR = {Barzel, Ronan}, LANGUAGE = {eng}, ISBN = {1568812469}, LOCALID = {Local-ID: C125675300671F7B-A639B7B1E56416A1C12570DD00510338-JGTECV2005}, PUBLISHER = {A.K. Peters}, ADDRESS = {Wellesley, Massachusetts, USA}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {In this paper, we present several methods that can greatly improve image quality when using the shadow mapping algorithm. Shadow artifacts introduced by shadow mapping are mainly due to low resolution shadow maps and/or the limited numerical precision used when performing the shadow test. These problems especially arise when the light source{\textquoteright}s viewing frustum, from which the shadow map is generated, is not adjusted to the actual camera view. We show how a tight-fitting frustum can be computed such that the shadow mapping algorithm concentrates on the visible parts of the scene and takes advantage of nearly the full available precision. Furthermore, we recommend uniformly spaced depth values in contrast to perspectively spaced depths in order to equally sample the scene seen from the light source.}, BOOKTITLE = {Graphics Tools: The jgt Editors' Choice}, DEBUG = {editor: Barzel, Ronan}, PAGES = {217--228}, }
Endnote
%0 Book Section %A Brabec, Stefan %A Annen, Thomas %A Seidel, Hans-Peter %E Barzel, Ronan %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Practical Shadow Mapping : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-276E-D %F EDOC: 279031 %F OTHER: Local-ID: C125675300671F7B-A639B7B1E56416A1C12570DD00510338-JGTECV2005 %I A.K. Peters %C Wellesley, Massachusetts, USA %D 2005 %X In this paper, we present several methods that can greatly improve image quality when using the shadow mapping algorithm. Shadow artifacts introduced by shadow mapping are mainly due to low resolution shadow maps and/or the limited numerical precision used when performing the shadow test. These problems especially arise when the light source&#8217;s viewing frustum, from which the shadow map is generated, is not adjusted to the actual camera view. We show how a tight-fitting frustum can be computed such that the shadow mapping algorithm concentrates on the visible parts of the scene and takes advantage of nearly the full available precision. Furthermore, we recommend uniformly spaced depth values in contrast to perspectively spaced depths in order to equally sample the scene seen from the light source. %B Graphics Tools: The jgt Editors' Choice %E Barzel, Ronan %P 217 - 228 %I A.K. Peters %C Wellesley, Massachusetts, USA %@ 1568812469
Albrecht, I., Blanz, V., Haber, J., and Seidel, H.-P. 2005a. Creating Face Models from Vague Mental Images. SIGGRAPH ’05: ACM SIGGRAPH 2005 Sketches, ACM.
Export
BibTeX
@inproceedings{DBLP:conf/siggraph/AlbrechtBHS05, TITLE = {Creating Face Models from Vague Mental Images}, AUTHOR = {Albrecht, Irene and Blanz, Volker and Haber, J{\"o}rg and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4503-7827-7}, DOI = {10.1145/1187112.1187210}, PUBLISHER = {ACM}, YEAR = {2005}, DATE = {2005}, BOOKTITLE = {SIGGRAPH '05: ACM SIGGRAPH 2005 Sketches}, EDITOR = {Buhler, Juan}, PAGES = {82}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Albrecht, Irene %A Blanz, Volker %A Haber, J&#246;rg %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Creating Face Models from Vague Mental Images : %G eng %U http://hdl.handle.net/21.11116/0000-000F-3CE9-8 %R 10.1145/1187112.1187210 %D 2005 %B International Conference on Computer Graphics and Interactive Techniques 2005 %Z date of event: 2005-07-31 - 2005-08-04 %C Los Angeles, CA, USA %B SIGGRAPH '05: ACM SIGGRAPH 2005 Sketches %E Buhler, Juan %P 82 %I ACM %@ 978-1-4503-7827-7
Albrecht, I., Schröder, M., Haber, J., and Seidel, H.-P. 2005b. Mixed feelings: Expression of Non-basic Emotions in a Muscle-based Talking Head. Virtual Reality8.
Export
BibTeX
@article{Albrecht-et-al_VR05, TITLE = {Mixed feelings: Expression of Non-basic Emotions in a Muscle-based Talking Head}, AUTHOR = {Albrecht, Irene and Schr{\"o}der, Marc and Haber, J{\"o}rg and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1434-9957}, DOI = {10.1007/s10055-005-0153-5}, LOCALID = {Local-ID: C125675300671F7B-1E15F277C67E1207C1256F54004BD0D0-Albrecht:MF}, PUBLISHER = {Springer}, ADDRESS = {London}, YEAR = {2005}, DATE = {2005}, JOURNAL = {Virtual Reality}, VOLUME = {8}, PAGES = {201--212}, }
Endnote
%0 Journal Article %A Albrecht, Irene %A Schr&#246;der, Marc %A Haber, J&#246;rg %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Mixed feelings: Expression of Non-basic Emotions in a Muscle-based Talking Head : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2713-A %F EDOC: 279034 %F OTHER: Local-ID: C125675300671F7B-1E15F277C67E1207C1256F54004BD0D0-Albrecht:MF %R 10.1007/s10055-005-0153-5 %D 2005 %* Review method: peer-reviewed %J Virtual Reality %V 8 %& 201 %P 201 - 212 %I Springer %C London %@ false %U https://rdcu.be/dHiZ7
Ahmed, N., de Aguiar, E., Theobalt, C., Magnor, M., and Seidel, H.-P. 2005. Automatic Generation of Personalized Human Avatars from Multi-view Video. VRST ’05, ACM Symposium on Virtual Reality Software & Technology, ACM.
Export
BibTeX
@inproceedings{NahmedVRST2005, TITLE = {Automatic Generation of Personalized Human Avatars from Multi-view Video}, AUTHOR = {Ahmed, Naveed and de Aguiar, Edilson and Theobalt, Christian and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {1-59593-098-1}, DOI = {10.1145/1101616.1101668}, LOCALID = {Local-ID: C1256BDE005F57A8-F81F02FAB6F8E958C12571570045F2C8-NahmedVRST2005}, PUBLISHER = {ACM}, YEAR = {2005}, DATE = {2005}, BOOKTITLE = {VRST '05, ACM Symposium on Virtual Reality Software \& Technology}, EDITOR = {Chrysanthou, Yiorgos and Darken, Rudolph}, PAGES = {257--260}, ADDRESS = {Monterey, USA}, }
Endnote
%0 Conference Proceedings %A Ahmed, Naveed %A de Aguiar, Edilson %A Theobalt, Christian %A Magnor, Marcus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Automatic Generation of Personalized Human Avatars from Multi-view Video : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2873-8 %F EDOC: 520713 %R 10.1145/1101616.1101668 %F OTHER: Local-ID: C1256BDE005F57A8-F81F02FAB6F8E958C12571570045F2C8-NahmedVRST2005 %D 2005 %B ACM Symposium on Virtual Reality Software & Technology %Z date of event: 2005-11-07 - 2005-11-09 %C Monterey, USA %B VRST '05 %E Chrysanthou, Yiorgos; Darken, Rudolph %P 257 - 260 %I ACM %@ 1-59593-098-1
2004
Ziegler, G., Lensch, H.P.A., Ahmed, N., Magnor, M., and Seidel, H.-P. 2004a. Multi-video Compression in Texture Space. Proceedings of the 11th IEEE International Conference on Image Processing (ICIP 2004), IEEE.
Abstract
We present a model-based approach to encode multiple synchronized<br> video streams depicting a dynamic scene from different viewpoints.<br> With approximate 3D scene geometry available, we compensate for<br> motion as well as disparity by transforming all video images <br> to object textures prior to compression.<br> A two-level hierarchical coding strategy is employed to efficiently<br> exploit inter-texture coherence as well as to ensure quick random<br> access during decoding.<br> Experimental validation shows that attainable compression ratios<br> range up to 50:1 without subsampling.<br> The proposed coding scheme is intended for use in conjunction with<br> Free-Viewpoint Video and 3D-TV applications.
Export
BibTeX
@inproceedings{Ziegler-et-al_ICIP04, TITLE = {Multi-video Compression in Texture Space}, AUTHOR = {Ziegler, Gernot and Lensch, Hendrik P. A. and Ahmed, Naveed and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7803-8555-1}, DOI = {10.1109/ICIP.2004.1421602}, LOCALID = {Local-ID: C125675300671F7B-D17123167FD763FEC1256EC4004487AE-Ziegler2003}, PUBLISHER = {IEEE}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {We present a model-based approach to encode multiple synchronized<br> video streams depicting a dynamic scene from different viewpoints.<br> With approximate 3D scene geometry available, we compensate for<br> motion as well as disparity by transforming all video images <br> to object textures prior to compression.<br> A two-level hierarchical coding strategy is employed to efficiently<br> exploit inter-texture coherence as well as to ensure quick random<br> access during decoding.<br> Experimental validation shows that attainable compression ratios<br> range up to 50:1 without subsampling.<br> The proposed coding scheme is intended for use in conjunction with<br> Free-Viewpoint Video and 3D-TV applications.}, BOOKTITLE = {Proceedings of the 11th IEEE International Conference on Image Processing (ICIP 2004)}, PAGES = {2467--2470}, ADDRESS = {Singapore}, }
Endnote
%0 Conference Proceedings %A Ziegler, Gernot %A Lensch, Hendrik P. A. %A Ahmed, Naveed %A Magnor, Marcus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Multi-video Compression in Texture Space : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2BBB-5 %F EDOC: 241625 %F OTHER: Local-ID: C125675300671F7B-D17123167FD763FEC1256EC4004487AE-Ziegler2003 %R 10.1109/ICIP.2004.1421602 %D 2004 %B 11th IEEE International Conference on Image Processing %Z date of event: 2004-10-24 - 2004-10-27 %C Singapore %X We present a model-based approach to encode multiple synchronized<br> video streams depicting a dynamic scene from different viewpoints.<br> With approximate 3D scene geometry available, we compensate for<br> motion as well as disparity by transforming all video images <br> to object textures prior to compression.<br> A two-level hierarchical coding strategy is employed to efficiently<br> exploit inter-texture coherence as well as to ensure quick random<br> access during decoding.<br> Experimental validation shows that attainable compression ratios<br> range up to 50:1 without subsampling.<br> The proposed coding scheme is intended for use in conjunction with<br> Free-Viewpoint Video and 3D-TV applications. %B Proceedings of the 11th IEEE International Conference on Image Processing %P 2467 - 2470 %I IEEE %@ 0-7803-8555-1
Ziegler, G., Lensch, H., Magnor, M., and Seidel, H.-P. 2004b. Multi-Video Compression in Texture Space using 4D SPIHT. Proceedings of the 6th IEEE Workshop on Multimedia Signal Processing (MMSP 2004), IEEE.
Export
BibTeX
@inproceedings{Ziegler-et-al_MMSP04, TITLE = {Multi-Video Compression in Texture Space using {4D} {SPIHT}}, AUTHOR = {Ziegler, Gernot and Lensch, Hendrik and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7803-8579-9}, DOI = {10.1109/MMSP.2004.1436410}, LOCALID = {Local-ID: C1256BDE005F57A8-2F4525C4C391F91BC1256F47003BD2E9-Ziegler2004:MVC4D}, PUBLISHER = {IEEE}, YEAR = {2004}, DATE = {2004}, BOOKTITLE = {Proceedings of the 6th IEEE Workshop on Multimedia Signal Processing (MMSP 2004)}, PAGES = {39--42}, ADDRESS = {Siena, Italy}, }
Endnote
%0 Conference Proceedings %A Ziegler, Gernot %A Lensch, Hendrik %A Magnor, Marcus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Multi-Video Compression in Texture Space using 4D SPIHT : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2BBE-0 %F EDOC: 241626 %F OTHER: Local-ID: C1256BDE005F57A8-2F4525C4C391F91BC1256F47003BD2E9-Ziegler2004:MVC4D %R 10.1109/MMSP.2004.1436410 %D 2004 %B 6th IEEE Workshop on Multimedia Signal Processing %Z date of event: 2004-09-29 - 2004-10-01 %C Siena, Italy %B Proceedings of the 6th IEEE Workshop on Multimedia Signal Processing %P 39 - 42 %I IEEE %@ 0-7803-8579-9
Zayer, R., Rössl, C., and Seidel, H.-P. 2004a. Variations of angle based flattening. In: N.A. Dodgson, M.S. Floater and M.A. Sabin, eds., Advances in Multiresolution for Geometric Modelling. Springer, Berlin, Germany.
Abstract
Angle Based Flattening is a robust parameterization technique allowing a free boundary. The numerical optimization associated with the approach yields a challenging problem. We discuss several approaches to effectively reduce the computational effort involved and propose appropriate numerical solvers. We propose a simple but effective transformation of the problem which reduces the computational cost and simplifies the implementation. We also show that fast convergence can be achieved by finding approximate solutions which yield a low angular distortion.
Export
BibTeX
@incollection{zrs:vabf:04, TITLE = {Variations of angle based flattening}, AUTHOR = {Zayer, Rhaleb and R{\"o}ssl, Christian and Seidel, Hans-Peter}, EDITOR = {Dodgson, Neil A. and Floater, Michael S. and Sabin, Malcom A.}, LANGUAGE = {eng}, ISBN = {3-540-21462-3}, LOCALID = {Local-ID: C125675300671F7B-0D759494D520EF50C1256E77004C48FD-zrs:vabf:04}, PUBLISHER = {Springer}, ADDRESS = {Berlin, Germany}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {Angle Based Flattening is a robust parameterization technique allowing a free boundary. The numerical optimization associated with the approach yields a challenging problem. We discuss several approaches to effectively reduce the computational effort involved and propose appropriate numerical solvers. We propose a simple but effective transformation of the problem which reduces the computational cost and simplifies the implementation. We also show that fast convergence can be achieved by finding approximate solutions which yield a low angular distortion.}, BOOKTITLE = {Advances in Multiresolution for Geometric Modelling}, DEBUG = {editor: Dodgson, Neil A.; editor: Floater, Michael S.; editor: Sabin, Malcom A.}, PAGES = {187--199}, SERIES = {Mathematics and Visualization}, }
Endnote
%0 Book Section %A Zayer, Rhaleb %A R&#246;ssl, Christian %A Seidel, Hans-Peter %E Dodgson, Neil A. %E Floater, Michael S. %E Sabin, Malcom A. %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Variations of angle based flattening : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-29D6-4 %F EDOC: 231370 %F OTHER: Local-ID: C125675300671F7B-0D759494D520EF50C1256E77004C48FD-zrs:vabf:04 %I Springer %C Berlin, Germany %D 2004 %X Angle Based Flattening is a robust parameterization technique allowing a free boundary. The numerical optimization associated with the approach yields a challenging problem. We discuss several approaches to effectively reduce the computational effort involved and propose appropriate numerical solvers. We propose a simple but effective transformation of the problem which reduces the computational cost and simplifies the implementation. We also show that fast convergence can be achieved by finding approximate solutions which yield a low angular distortion. %B Advances in Multiresolution for Geometric Modelling %E Dodgson, Neil A.; Floater, Michael S.; Sabin, Malcom A. %P 187 - 199 %I Springer %C Berlin, Germany %@ 3-540-21462-3 %S Mathematics and Visualization
Zayer, R., Rössl, C., and Seidel, H.-P. 2004b. Efficient Iterative Solvers for Angle Based Flattening. Vision, modeling, and visualization 2004 (VMV 2004), Akademische Verlagsgesellschaft Aka.
Export
BibTeX
@inproceedings{Zayer-et-al_VMV04, TITLE = {Efficient Iterative Solvers for Angle Based Flattening}, AUTHOR = {Zayer, Rhaleb and R{\"o}ssl, Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-89838-058-0}, LOCALID = {Local-ID: C125675300671F7B-97FFF39552CE825EC1256F7F00447388-zayer:eisabf:2004}, PUBLISHER = {Akademische Verlagsgesellschaft Aka}, YEAR = {2004}, DATE = {2004}, BOOKTITLE = {Vision, modeling, and visualization 2004 (VMV 2004)}, EDITOR = {Girod, Bernd and Magnor, Marcus and Seidel, Hans-Peter}, PAGES = {347--354}, ADDRESS = {Stanford, CA, USA}, }
Endnote
%0 Conference Proceedings %A Zayer, Rhaleb %A R&#246;ssl, Christian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Efficient Iterative Solvers for Angle Based Flattening : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-29D8-F %F EDOC: 231371 %F OTHER: Local-ID: C125675300671F7B-97FFF39552CE825EC1256F7F00447388-zayer:eisabf:2004 %D 2004 %B 9th International Fall Workshop on Vision, modeling, and visualization %Z date of event: 2004-11-16 - 2004-11-18 %C Stanford, CA, USA %B Vision, modeling, and visualization 2004 %E Girod, Bernd; Magnor, Marcus; Seidel, Hans-Peter %P 347 - 354 %I Akademische Verlagsgesellschaft Aka %@ 3-89838-058-0
Zayer, R., Rössl, C., and Seidel, H.-P. 2004c. r-Adaptive parameterization of surfaces. Max-Planck-Institut für Informatik, Saarbrücken.
Export
BibTeX
@techreport{, TITLE = {r-Adaptive parameterization of surfaces}, AUTHOR = {Zayer, Rhaleb and R{\"o}ssl, Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, NUMBER = {MPI-I-2004-4-004}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2004}, DATE = {2004-06}, TYPE = {Max-Planck-Institut f&#252;r Informatik <Saarbr&#252;cken>: Research Report}, EDITOR = {{Max-Planck-Institut f{\"u}r Informatik {\textless}Saarbr{\"u}cken{\textgreater}}}, }
Endnote
%0 Report %A Zayer, Rhaleb %A R&#246;ssl, Christian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T r-Adaptive parameterization of surfaces : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-28E9-2 %F EDOC: 237863 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2004 %P 10 p. %B Max-Planck-Institut f&#252;r Informatik <Saarbr&#252;cken>: Research Report
Zakaria, N. and Seidel, H.-P. 2004. Interactive Stylized Silhouette for Point-sampled Geometry. Proceedings of the 2nd International Conference on Computer Graphics and Interactive Techniques in Australasia and Southeast Asia (GRAPHITE 2004), ACM.
Export
BibTeX
@inproceedings{DBLP:conf/graphite/ZakariaS04, TITLE = {Interactive Stylized Silhouette for Point-sampled Geometry}, AUTHOR = {Zakaria, Nordin and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-58113-883-2}, DOI = {10.1145/988834.988876}, PUBLISHER = {ACM}, YEAR = {2004}, DATE = {2004}, BOOKTITLE = {Proceedings of the 2nd International Conference on Computer Graphics and Interactive Techniques in Australasia and Southeast Asia (GRAPHITE 2004)}, EDITOR = {Lee, Yong Tsui and Spencer, Stephen N. and Chalmers, Alan and Soon, Seah Hock}, PAGES = {242--249}, ADDRESS = {Singapore}, }
Endnote
%0 Conference Proceedings %A Zakaria, Nordin %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive Stylized Silhouette for Point-sampled Geometry : %G eng %U http://hdl.handle.net/21.11116/0000-000F-2723-E %R 10.1145/988834.988876 %D 2004 %B 2nd International Conference on Computer Graphics and Interactive Techniques in Australasia and South East Asia %Z date of event: 2004-06-15 - 2004-06-18 %C Singapore %B Proceedings of the 2nd International Conference on Computer Graphics and Interactive Techniques in Australasia and Southeast Asia %E Lee, Yong Tsui; Spencer, Stephen N.; Chalmers, Alan; Soon, Seah Hock %P 242 - 249 %I ACM %@ 978-1-58113-883-2
Yoshizawa, S., Belyaev, A., and Seidel, H.-P. 2004. A Fast and Simple Stretch-minimizing Mesh Parameterization. Shape Modeling International 2004 (SMI 2004), IEEE.
Abstract
We propose a fast and simple method for generating a low-stretch<br>mesh parameterization. Given a triangle mesh, we start from <br>the Floater shape preserving parameterization and then<br>improve the parameterization gradually. At each improvement step,<br>we optimize the parameterization generated at the previous step<br>by minimizing a weighted quadratic energy where the weights <br>are chosen in order to minimize the parameterization stretch.<br>This optimization procedure does not generate triangle <br>flips if the boundary of the parameter domain is a convex polygon. <br>Moreover already the first optimization step produces a high-quality mesh <br>parameterization. We compare our parameterization procedure with <br>several state-of-art mesh parameterization methods and demonstrate <br>its speed and high efficiency in parameterizing large and geometrically <br>complex models.
Export
BibTeX
@inproceedings{Yoshizawa-et-al_SMI04, TITLE = {A Fast and Simple Stretch-minimizing Mesh Parameterization}, AUTHOR = {Yoshizawa, Shin and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-2075}, DOI = {10.1109/SMI.2004.1314507}, LOCALID = {Local-ID: C125675300671F7B-EDF96A612555FA3BC1256F7F003B0FBE-Yoshizawa2004}, PUBLISHER = {IEEE}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {We propose a fast and simple method for generating a low-stretch<br>mesh parameterization. Given a triangle mesh, we start from <br>the Floater shape preserving parameterization and then<br>improve the parameterization gradually. At each improvement step,<br>we optimize the parameterization generated at the previous step<br>by minimizing a weighted quadratic energy where the weights <br>are chosen in order to minimize the parameterization stretch.<br>This optimization procedure does not generate triangle <br>flips if the boundary of the parameter domain is a convex polygon. <br>Moreover already the first optimization step produces a high-quality mesh <br>parameterization. We compare our parameterization procedure with <br>several state-of-art mesh parameterization methods and demonstrate <br>its speed and high efficiency in parameterizing large and geometrically <br>complex models.}, BOOKTITLE = {Shape Modeling International 2004 (SMI 2004)}, EDITOR = {Giannini, Franca and Pasko, Alexander}, PAGES = {200--208}, ADDRESS = {Genova, Italy}, }
Endnote
%0 Conference Proceedings %A Yoshizawa, Shin %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Fast and Simple Stretch-minimizing Mesh Parameterization : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-29D4-8 %F EDOC: 231369 %F OTHER: Local-ID: C125675300671F7B-EDF96A612555FA3BC1256F7F003B0FBE-Yoshizawa2004 %R 10.1109/SMI.2004.1314507 %D 2004 %B 2004 International Conference on Shape Modeling and Applications %Z date of event: 2004-06-07 - 2004-06-09 %C Genova, Italy %X We propose a fast and simple method for generating a low-stretch<br>mesh parameterization. Given a triangle mesh, we start from <br>the Floater shape preserving parameterization and then<br>improve the parameterization gradually. At each improvement step,<br>we optimize the parameterization generated at the previous step<br>by minimizing a weighted quadratic energy where the weights <br>are chosen in order to minimize the parameterization stretch.<br>This optimization procedure does not generate triangle <br>flips if the boundary of the parameter domain is a convex polygon. <br>Moreover already the first optimization step produces a high-quality mesh <br>parameterization. We compare our parameterization procedure with <br>several state-of-art mesh parameterization methods and demonstrate <br>its speed and high efficiency in parameterizing large and geometrically <br>complex models. %B Shape Modeling International 2004 (SMI 2004) %E Giannini, Franca; Pasko, Alexander %P 200 - 208 %I IEEE %@ 0-7695-2075
Weinkauf, T., Hege, H.-C., Seidel, H.-P., and Theisel, H. 2004a. Boundary Switch Connectors for Topological Visualization of Complex 3D Vector Fields. 2004 Eurographics / IEEE VGTC Symposium on Visualization (VisSym 2004), The Eurographics Association.
Export
BibTeX
@inproceedings{Weinkauf-et-al_VisSym04, TITLE = {Boundary Switch Connectors for Topological Visualization of Complex {3D} Vector Fields}, AUTHOR = {Weinkauf, Tino and Hege, Hans-Christian and Seidel, Hans-Peter and Theisel, Holger}, LANGUAGE = {eng}, ISBN = {3-905673-07-X}, DOI = {10.2312/VisSym/VisSym04/183-192}, LOCALID = {Local-ID: C125675300671F7B-C8DB3D9D1CCF6DF8C1256E780060A567-Theisel2004_visysm}, PUBLISHER = {The Eurographics Association}, YEAR = {2004}, DATE = {2004}, BOOKTITLE = {2004 Eurographics / IEEE VGTC Symposium on Visualization (VisSym 2004)}, EDITOR = {Deussen, Oliver and Hansen, Charles and Keim, Daniel A. and Saupe, Dietmar}, PAGES = {183--192}, ADDRESS = {Konstanz, Germany}, }
Endnote
%0 Conference Proceedings %A Weinkauf, Tino %A Hege, Hans-Christian %A Seidel, Hans-Peter %A Theisel, Holger %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Boundary Switch Connectors for Topological Visualization of Complex 3D Vector Fields : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2A3D-7 %F EDOC: 231915 %F OTHER: Local-ID: C125675300671F7B-C8DB3D9D1CCF6DF8C1256E780060A567-Theisel2004_visysm %R 10.2312/VisSym/VisSym04/183-192 %D 2004 %B 6th Joint EUROGRAPHICS - IEEE TCVG Symposium on Visualization (2004) %Z date of event: 2004-05-19 - 2004-05-21 %C Konstanz, Germany %B 2004 Eurographics / IEEE VGTC Symposium on Visualization %E Deussen, Oliver; Hansen, Charles; Keim, Daniel A.; Saupe, Dietmar %P 183 - 192 %I The Eurographics Association %@ 3-905673-07-X
Weinkauf, T., Theisel, H., Hege, H.-C., and Seidel, H.-P. 2004b. Topological Construction and Visualization of Higher Order 3D Vector Fields. Computer Graphics Forum (Proc. EUROGRAPHICS 2004), Blackwell.
Export
BibTeX
@inproceedings{Weinkauf-et-al_EUROGRAPHICS04, TITLE = {Topological Construction and Visualization of Higher Order {3D} Vector Fields}, AUTHOR = {Weinkauf, Tino and Theisel, Holger and Hege, Hans-Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2004.00778.x}, LOCALID = {Local-ID: C125675300671F7B-32AFDD05CAE0C9B2C1256E78005FE7AC-Theisel2004_eg}, PUBLISHER = {Blackwell}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2004}, DATE = {2004}, BOOKTITLE = {The European Association for Computer Graphics 25th Annual Conference (EUROGRAPHICS 2004)}, EDITOR = {Cani, Marie-Paule and Slater, Mel}, PAGES = {469--478}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {23}, ISSUE = {3}, ADDRESS = {Grenoble, France}, }
Endnote
%0 Conference Proceedings %A Weinkauf, Tino %A Theisel, Holger %A Hege, Hans-Christian %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Topological Construction and Visualization of Higher Order 3D Vector Fields : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-29D2-C %F EDOC: 231367 %F OTHER: Local-ID: C125675300671F7B-32AFDD05CAE0C9B2C1256E78005FE7AC-Theisel2004_eg %R 10.1111/j.1467-8659.2004.00778.x %D 2004 %B The European Association for Computer Graphics 25th Annual Conference %Z date of event: 2004-08-30 - %C Grenoble, France %B The European Association for Computer Graphics 25th Annual Conference %E Cani, Marie-Paule; Slater, Mel %P 469 - 478 %I Blackwell %J Computer Graphics Forum %V 23 %N 3 %I Blackwell-Wiley %@ false
Weber, M., Milch, M., Myszkowski, K., Dmitriev, K.A., Rokita, P., and Seidel, H.-P. 2004. Spatio-temporal Photon Density Estimation Using Bilateral Filtering. Proceedings of the 2004 Computer Graphics International Conference (CGI 2004), IEEE.
Abstract
Photon tracing and density estimation are well established<br>techniques in global illumination computation and rendering of<br>high-quality animation sequences. Using traditional density estimation <br>techniques<br>it is difficult to remove stochastic noise inherent for photon-based<br>methods while avoiding overblurring lighting details.<br>In this paper we investigate the use of bilateral filtering<br>for lighting reconstruction based on the local density of photon hit points.<br>Bilateral filtering is applied in spatio-temporal domain and<br>provides control over the level-of-details in reconstructed lighting.<br>All changes of lighting below this level are treated as<br>stochastic noise and are suppressed. Bilateral<br>filtering proves to be efficient in preserving sharp features<br>in lighting which is in particular important for high-quality caustic<br>reconstruction. Also, flickering<br>between subsequent animation frames is substantially reduced<br>due to extending bilateral filtering into temporal domain.
Export
BibTeX
@inproceedings{Weber-et-al_CGI04, TITLE = {Spatio-temporal Photon Density Estimation Using Bilateral Filtering}, AUTHOR = {Weber, Markus and Milch, Marco and Myszkowski, Karol and Dmitriev, Kirill Alexandrovich and Rokita, Przemyslaw and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-2171-1}, DOI = {10.1109/CGI.2004.1309200}, LOCALID = {Local-ID: C125675300671F7B-E7C820E451C4356AC1256E46006AB0DF-Weber2004}, PUBLISHER = {IEEE}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {Photon tracing and density estimation are well established<br>techniques in global illumination computation and rendering of<br>high-quality animation sequences. Using traditional density estimation <br>techniques<br>it is difficult to remove stochastic noise inherent for photon-based<br>methods while avoiding overblurring lighting details.<br>In this paper we investigate the use of bilateral filtering<br>for lighting reconstruction based on the local density of photon hit points.<br>Bilateral filtering is applied in spatio-temporal domain and<br>provides control over the level-of-details in reconstructed lighting.<br>All changes of lighting below this level are treated as<br>stochastic noise and are suppressed. Bilateral<br>filtering proves to be efficient in preserving sharp features<br>in lighting which is in particular important for high-quality caustic<br>reconstruction. Also, flickering<br>between subsequent animation frames is substantially reduced<br>due to extending bilateral filtering into temporal domain.}, BOOKTITLE = {Proceedings of the 2004 Computer Graphics International Conference (CGI 2004)}, EDITOR = {Cohen-Or, Daniel and Jain, Lakhmi and Magnenat-Thalmann, Nadia}, PAGES = {120--127}, ADDRESS = {Crete, Greece}, }
Endnote
%0 Conference Proceedings %A Weber, Markus %A Milch, Marco %A Myszkowski, Karol %A Dmitriev, Kirill Alexandrovich %A Rokita, Przemyslaw %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Spatio-temporal Photon Density Estimation Using Bilateral Filtering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2B40-6 %F EDOC: 231920 %F OTHER: Local-ID: C125675300671F7B-E7C820E451C4356AC1256E46006AB0DF-Weber2004 %R 10.1109/CGI.2004.1309200 %D 2004 %B Computer Graphics International 2004 %Z date of event: 2004-06-16 - 2004-06-19 %C Crete, Greece %X Photon tracing and density estimation are well established<br>techniques in global illumination computation and rendering of<br>high-quality animation sequences. Using traditional density estimation <br>techniques<br>it is difficult to remove stochastic noise inherent for photon-based<br>methods while avoiding overblurring lighting details.<br>In this paper we investigate the use of bilateral filtering<br>for lighting reconstruction based on the local density of photon hit points.<br>Bilateral filtering is applied in spatio-temporal domain and<br>provides control over the level-of-details in reconstructed lighting.<br>All changes of lighting below this level are treated as<br>stochastic noise and are suppressed. Bilateral<br>filtering proves to be efficient in preserving sharp features<br>in lighting which is in particular important for high-quality caustic<br>reconstruction. Also, flickering<br>between subsequent animation frames is substantially reduced<br>due to extending bilateral filtering into temporal domain. %B Proceedings of the 2004 Computer Graphics International Conference %E Cohen-Or, Daniel; Jain, Lakhmi; Magnenat-Thalmann, Nadia %P 120 - 127 %I IEEE %@ 0-7695-2171-1
Theobalt, C., Albrecht, I., Haber, J., Magnor, M., and Seidel, H.-P. 2004a. Pitching a Baseball - Tracking High-speed Motion with Multi-exposure Images. ACM Transactions on Graphics23, 3.
Export
BibTeX
@article{Theobalt-et-al_TG04, TITLE = {Pitching a Baseball -- Tracking High-speed Motion with Multi-exposure Images}, AUTHOR = {Theobalt, Christian and Albrecht, Irene and Haber, Joerg and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/1015706.1015758}, LOCALID = {Local-ID: C1256BDE005F57A8-89A2F8F4139CC40EC1256F3A003A8B19-Theobalt2004:PBT}, PUBLISHER = {Association for Computing Machinery}, ADDRESS = {New York, NY}, YEAR = {2004}, DATE = {2004}, JOURNAL = {ACM Transactions on Graphics}, EDITOR = {Marks, Joe}, VOLUME = {23}, NUMBER = {3}, PAGES = {540--547}, }
Endnote
%0 Journal Article %A Theobalt, Christian %A Albrecht, Irene %A Haber, Joerg %A Magnor, Marcus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Programming Logics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Pitching a Baseball - Tracking High-speed Motion with Multi-exposure Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2BB5-2 %F EDOC: 241620 %F OTHER: Local-ID: C1256BDE005F57A8-89A2F8F4139CC40EC1256F3A003A8B19-Theobalt2004:PBT %R 10.1145/1015706.1015758 %D 2004 %J ACM Transactions on Graphics %V 23 %N 3 %& 540 %P 540 - 547 %I Association for Computing Machinery %C New York, NY %@ false
Theobalt, C., Magnor, M., Schüler, P., and Seidel, H.-P. 2004b. Combining 2D Feature Tracking and Volume Reconstruction for Online Video-based Human Motion Capture. International Journal of Image and Graphics4, 4.
Export
BibTeX
@article{Theobalt-et-al_IJIG04, TITLE = {Combining {2D} Feature Tracking and Volume Reconstruction for Online Video-based Human Motion Capture}, AUTHOR = {Theobalt, Christian and Magnor, Marcus and Sch{\"u}ler, Pascal and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0219-4678}, DOI = {10.1142/S0219467804001543}, LOCALID = {Local-ID: C1256BDE005F57A8-A541948E27FB239FC1256F350033511E-Theobalt04:C2DF}, PUBLISHER = {World Scientific}, ADDRESS = {Singapore}, YEAR = {2004}, DATE = {2004}, JOURNAL = {International Journal of Image and Graphics}, VOLUME = {4}, NUMBER = {4}, PAGES = {563--583}, }
Endnote
%0 Journal Article %A Theobalt, Christian %A Magnor, Marcus %A Sch&#252;ler, Pascal %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Programming Logics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Combining 2D Feature Tracking and Volume Reconstruction for Online Video-based Human Motion Capture : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2A56-D %F EDOC: 231804 %F OTHER: Local-ID: C1256BDE005F57A8-A541948E27FB239FC1256F350033511E-Theobalt04:C2DF %R 10.1142/S0219467804001543 %D 2004 %* Review method: peer-reviewed %J International Journal of Image and Graphics %V 4 %N 4 %& 563 %P 563 - 583 %I World Scientific %C Singapore %@ false
Theobalt, C., Carranza, J., Magnor, M., and Seidel, H.-P. 2004c. Combining 3D Flow Fields with Silhouette-based Human Motion Capture for Immersive Video. Graphical Models66, 6.
Abstract
\begin{abstract}<br><br>In recent years, the convergence of Computer Vision and Computer Graphics has<br>put forth a new field of research that focuses on the reconstruction of <br>real-world scenes<br>from video streams.<br>To make immersive \mbox{3D} video reality, the whole pipeline spanning from <br>scene acquisition<br>over \mbox{3D} video reconstruction to real-time rendering needs to be <br>researched.<br><br>In this paper, we describe latest advancements of our system to record, <br>reconstruct and render<br>free-viewpoint videos of human actors.<br><br>We apply a silhouette-based non-intrusive motion capture <br>algorithm making use of a 3D human body model to estimate the actor's <br>parameters of motion<br>from multi-view video streams. A renderer plays back the acquired motion <br>sequence in real-time<br>from any arbitrary perspective. Photo-realistic physical appearance of the <br>moving actor is<br>obtained by generating time-varying multi-view textures from video.<br>This work shows how the motion capture sub-system can be enhanced<br>by incorporating texture information from the input video streams into the <br>tracking process. 3D motion fields<br>are reconstructed from optical flow that are used in combination with <br>silhouette matching to estimate pose parameters. We demonstrate that a <br>high visual quality can be achieved with the proposed approach and validate the <br>enhancements caused by the the motion field step.<br><br>\end{abstract}
Export
BibTeX
@article{Theobalt-et-al_GM04, TITLE = {Combining {3D} Flow Fields with Silhouette-based Human Motion Capture for Immersive Video}, AUTHOR = {Theobalt, Christian and Carranza, Joel and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1524-0703}, DOI = {10.1016/j.gmod.2004.06.009}, LOCALID = {Local-ID: C125675300671F7B-4C0C1106C6521B65C1256F5B004A40E1-TheobaltGM2004}, PUBLISHER = {Academic Press}, ADDRESS = {San Diego, Calif.}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {\begin{abstract}<br><br>In recent years, the convergence of Computer Vision and Computer Graphics has<br>put forth a new field of research that focuses on the reconstruction of <br>real-world scenes<br>from video streams.<br>To make immersive \mbox{3D} video reality, the whole pipeline spanning from <br>scene acquisition<br>over \mbox{3D} video reconstruction to real-time rendering needs to be <br>researched.<br><br>In this paper, we describe latest advancements of our system to record, <br>reconstruct and render<br>free-viewpoint videos of human actors.<br><br>We apply a silhouette-based non-intrusive motion capture <br>algorithm making use of a 3D human body model to estimate the actor's <br>parameters of motion<br>from multi-view video streams. A renderer plays back the acquired motion <br>sequence in real-time<br>from any arbitrary perspective. Photo-realistic physical appearance of the <br>moving actor is<br>obtained by generating time-varying multi-view textures from video.<br>This work shows how the motion capture sub-system can be enhanced<br>by incorporating texture information from the input video streams into the <br>tracking process. 3D motion fields<br>are reconstructed from optical flow that are used in combination with <br>silhouette matching to estimate pose parameters. We demonstrate that a <br>high visual quality can be achieved with the proposed approach and validate the <br>enhancements caused by the the motion field step.<br><br>\end{abstract}}, JOURNAL = {Graphical Models}, VOLUME = {66}, NUMBER = {6}, PAGES = {333--351}, }
Endnote
%0 Journal Article %A Theobalt, Christian %A Carranza, Joel %A Magnor, Marcus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Programming Logics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Combining 3D Flow Fields with Silhouette-based Human Motion Capture for Immersive Video : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-29CE-7 %F EDOC: 231360 %F OTHER: Local-ID: C125675300671F7B-4C0C1106C6521B65C1256F5B004A40E1-TheobaltGM2004 %R 10.1016/j.gmod.2004.06.009 %D 2004 %* Review method: peer-reviewed %X \begin{abstract}<br><br>In recent years, the convergence of Computer Vision and Computer Graphics has<br>put forth a new field of research that focuses on the reconstruction of <br>real-world scenes<br>from video streams.<br>To make immersive \mbox{3D} video reality, the whole pipeline spanning from <br>scene acquisition<br>over \mbox{3D} video reconstruction to real-time rendering needs to be <br>researched.<br><br>In this paper, we describe latest advancements of our system to record, <br>reconstruct and render<br>free-viewpoint videos of human actors.<br><br>We apply a silhouette-based non-intrusive motion capture <br>algorithm making use of a 3D human body model to estimate the actor's <br>parameters of motion<br>from multi-view video streams. A renderer plays back the acquired motion <br>sequence in real-time<br>from any arbitrary perspective. Photo-realistic physical appearance of the <br>moving actor is<br>obtained by generating time-varying multi-view textures from video.<br>This work shows how the motion capture sub-system can be enhanced<br>by incorporating texture information from the input video streams into the <br>tracking process. 3D motion fields<br>are reconstructed from optical flow that are used in combination with <br>silhouette matching to estimate pose parameters. We demonstrate that a <br>high visual quality can be achieved with the proposed approach and validate the <br>enhancements caused by the the motion field step.<br><br>\end{abstract} %J Graphical Models %V 66 %N 6 %& 333 %P 333 - 351 %I Academic Press %C San Diego, Calif. %@ false
Theobalt, C., Carranza, J., Magnor, M., and Seidel, H.-P. 2004d. 3D Video - Being Part of the Movie. ACM Computer Graphics38.
Export
BibTeX
@article{Theobalt04:3DVB, TITLE = {{3D} Video -- Being Part of the Movie}, AUTHOR = {Theobalt, Christian and Carranza, Joel and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0097-8930}, LOCALID = {Local-ID: C1256BDE005F57A8-45C462AC43778956C1256F3500367E09-Theobalt04:3DVB}, YEAR = {2004}, DATE = {2004}, JOURNAL = {ACM Computer Graphics}, VOLUME = {38}, PAGES = {18--20}, }
Endnote
%0 Journal Article %A Theobalt, Christian %A Carranza, Joel %A Magnor, Marcus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Programming Logics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T 3D Video - Being Part of the Movie : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-29F9-5 %F EDOC: 231811 %F OTHER: Local-ID: C1256BDE005F57A8-45C462AC43778956C1256F3500367E09-Theobalt04:3DVB %D 2004 %* Review method: peer-reviewed %J ACM Computer Graphics %V 38 %& 18 %P 18 - 20 %@ false
Theobalt, C., Ziegler, G., Magnor, M., and Seidel, H.-P. 2004e. Model-Based Free-Viewpoint Video Acquisition, Rendering and Encoding. Picture Coding Symposium 2004 (PCS-04), UC Davis.
Export
BibTeX
@inproceedings{Theobalt2004:MBF, TITLE = {Model-Based Free-Viewpoint Video Acquisition, Rendering and Encoding}, AUTHOR = {Theobalt, Christian and Ziegler, Gernot and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, LOCALID = {Local-ID: C1256BDE005F57A8-14235A494177D82AC1256F4700392829-Theobalt2004:MBF}, PUBLISHER = {UC Davis}, YEAR = {2004}, DATE = {2004}, BOOKTITLE = {Picture Coding Symposium 2004 (PCS-04)}, PAGES = {SpecialSession5,1--6}, }
Endnote
%0 Conference Proceedings %A Theobalt, Christian %A Ziegler, Gernot %A Magnor, Marcus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Programming Logics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Model-Based Free-Viewpoint Video Acquisition, Rendering and Encoding : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2ADF-E %F EDOC: 231797 %F OTHER: Local-ID: C1256BDE005F57A8-14235A494177D82AC1256F4700392829-Theobalt2004:MBF %I UC Davis %D 2004 %B Untitled Event %Z date of event: 2004-11-01 - %C San Francisco, USA %B Picture Coding Symposium 2004 (PCS-04) %P SpecialSession5,1 - 6 %I UC Davis
Theobalt, C., de Aguiar, E., Magnor, M., Theisel, H., and Seidel, H.-P. 2004f. Marker-free Kinematic Skeleton Estimation from Sequences of Volume Data. Proceedings of the ACM Symposium on Virtual Reality Software and Technology (VRST 2004), ACM.
Export
BibTeX
@inproceedings{Theobalt-et-al_VRST04, TITLE = {Marker-free Kinematic Skeleton Estimation from Sequences of Volume Data}, AUTHOR = {Theobalt, Christian and de Aguiar, Edilson and Magnor, Marcus and Theisel, Holger and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {1-58113-907-1}, DOI = {10.1145/1077534.1077546}, LOCALID = {Local-ID: C1256BDE005F57A8-0D8C31AAD119A87AC1256F47003A4910-Theobalt2004:MFK}, PUBLISHER = {ACM}, YEAR = {2004}, DATE = {2004}, BOOKTITLE = {Proceedings of the ACM Symposium on Virtual Reality Software and Technology (VRST 2004)}, EDITOR = {Lau, Rynson and Baciu, George}, PAGES = {57--64}, ADDRESS = {Hong Kong, China}, }
Endnote
%0 Conference Proceedings %A Theobalt, Christian %A de Aguiar, Edilson %A Magnor, Marcus %A Theisel, Holger %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Programming Logics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Marker-free Kinematic Skeleton Estimation from Sequences of Volume Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2BB8-B %F EDOC: 241623 %F OTHER: Local-ID: C1256BDE005F57A8-0D8C31AAD119A87AC1256F47003A4910-Theobalt2004:MFK %R 10.1145/1077534.1077546 %D 2004 %B ACM Symposium on Virtual Reality Software and Technology 2003 %Z date of event: 2004-11-10 - 2004-11-12 %C Hong Kong, China %B Proceedings of the ACM Symposium on Virtual Reality Software and Technology %E Lau, Rynson; Baciu, George %P 57 - 64 %I ACM %@ 1-58113-907-1
Theisel, H., Weinkauf, T., Hege, H.-C., and Seidel, H.-P. 2004a. Stream Line and Path Line Oriented Topology for 2D Time-dependent Vector Fields. IEEE Visualization 2004, IEEE.
Export
BibTeX
@inproceedings{Theisel-et-al_VIS04, TITLE = {Stream Line and Path Line Oriented Topology for {2D} Time-dependent Vector Fields}, AUTHOR = {Theisel, Holger and Weinkauf, Tino and Hege, Hans-Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7803-8788-0}, DOI = {10.1109/VISUAL.2004.99}, LOCALID = {Local-ID: C125675300671F7B-7B8ABB22698705B7C1256EBF00325D1F-Theisel2004_vis}, PUBLISHER = {IEEE}, YEAR = {2004}, DATE = {2004}, BOOKTITLE = {IEEE Visualization 2004}, EDITOR = {Rushmeier, Holly and Turk, Greg and Van Wijk, Jack}, PAGES = {321--328}, ADDRESS = {Austin, USA}, }
Endnote
%0 Conference Proceedings %A Theisel, Holger %A Weinkauf, Tino %A Hege, Hans-Christian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Stream Line and Path Line Oriented Topology for 2D Time-dependent Vector Fields : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2B44-D %F EDOC: 231896 %F OTHER: Local-ID: C125675300671F7B-7B8ABB22698705B7C1256EBF00325D1F-Theisel2004_vis %R 10.1109/VISUAL.2004.99 %D 2004 %B IEEE 2004 Conference on Visualization %Z date of event: 2004-10-10 - 2004-10-15 %C Austin, USA %B IEEE Visualization 2004 %E Rushmeier, Holly; Turk, Greg; Van Wijk, Jack %P 321 - 328 %I IEEE %@ 0-7803-8788-0
Theisel, H., Rössl, C., and Seidel, H.-P. 2004b. Topology Preserving Thinning of Vector Fields on Triangular Meshes. In: Advances in Multiresolution for Geometric Modelling. Springer, Berlin, Germany.
Abstract
We consider the topology of piecewise linear vector fields whose domain is a <br>piecewise linear 2-manifold, i.e. a triangular mesh. Such vector fields can <br>describe simulated 2-dimensional flows, or they may reflect geometric <br>properties of the underlying mesh. We introduce a thinning technique which <br>preserves the complete topology of the vector field, i.e. the critical points <br>and separatrices. As the theoretical foundation, we have shown in an earlier <br>paper that for local modiØcations of a vector field, it is possible to decide <br>entirely by a local analysis whether or not the global topology is preserved. <br>This result is applied in a number of compression algorithms which are based on <br>a repeated local modification of the vector field, namely a repeated <br>edge-collapse of the underlying piecewise linear domain.
Export
BibTeX
@incollection{Theisel-et-al_AMGM04, TITLE = {Topology Preserving Thinning of Vector Fields on Triangular Meshes}, AUTHOR = {Theisel, Holger and R{\"o}ssl, Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1612-3786}, ISBN = {3-540-21462-3}, DOI = {10.1007/3-540-26808-1_20}, LOCALID = {Local-ID: C125675300671F7B-786D77D58525CD29C1256E77004B68B3-trs:tptfv:04}, PUBLISHER = {Springer}, ADDRESS = {Berlin, Germany}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {We consider the topology of piecewise linear vector fields whose domain is a <br>piecewise linear 2-manifold, i.e. a triangular mesh. Such vector fields can <br>describe simulated 2-dimensional flows, or they may reflect geometric <br>properties of the underlying mesh. We introduce a thinning technique which <br>preserves the complete topology of the vector field, i.e. the critical points <br>and separatrices. As the theoretical foundation, we have shown in an earlier <br>paper that for local modi{\O}cations of a vector field, it is possible to decide <br>entirely by a local analysis whether or not the global topology is preserved. <br>This result is applied in a number of compression algorithms which are based on <br>a repeated local modification of the vector field, namely a repeated <br>edge-collapse of the underlying piecewise linear domain.}, BOOKTITLE = {Advances in Multiresolution for Geometric Modelling}, EDITOR = {Dodgson, Neil A. and Floater, Michael S. and Sabin, Malcom A.}, PAGES = {353--366}, SERIES = {Mathematics and Visualization}, }
Endnote
%0 Book Section %A Theisel, Holger %A R&#246;ssl, Christian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Topology Preserving Thinning of Vector Fields on Triangular Meshes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2B61-D %F EDOC: 231898 %F OTHER: Local-ID: C125675300671F7B-786D77D58525CD29C1256E77004B68B3-trs:tptfv:04 %R 10.1007/3-540-26808-1_20 %D 2004 %X We consider the topology of piecewise linear vector fields whose domain is a <br>piecewise linear 2-manifold, i.e. a triangular mesh. Such vector fields can <br>describe simulated 2-dimensional flows, or they may reflect geometric <br>properties of the underlying mesh. We introduce a thinning technique which <br>preserves the complete topology of the vector field, i.e. the critical points <br>and separatrices. As the theoretical foundation, we have shown in an earlier <br>paper that for local modi&#216;cations of a vector field, it is possible to decide <br>entirely by a local analysis whether or not the global topology is preserved. <br>This result is applied in a number of compression algorithms which are based on <br>a repeated local modification of the vector field, namely a repeated <br>edge-collapse of the underlying piecewise linear domain. %B Advances in Multiresolution for Geometric Modelling %E Dodgson, Neil A.; Floater, Michael S.; Sabin, Malcom A. %P 353 - 366 %I Springer %C Berlin, Germany %@ 3-540-21462-3 %S Mathematics and Visualization %@ false %U https://rdcu.be/dEZxb
Theisel, H., Rössl, C., Zayer, R., and Seidel, H.-P. 2004c. Normal Based Estimation of the Curvature Tensor for Triangular Meshes. Proceedings of the 12th Pacific Conference on Computer Graphics and Applications (PG 2004), IEEE.
Abstract
We introduce a new technique for estimating the curvature tensor of a <br>triangular mesh. The input of the algorithm is only a single triangle equipped <br>with its (exact or estimated) vertex normals. This way we get a smooth function <br>of the curvature tensor inside each triangle of the mesh. We show that the <br>error of the new method is comparable with the error of a cubic fitting <br>approach if the incorporated normals are estimated. If the exact normals of the <br>underlying surface are available at the vertices, the error drops signifi- <br>cantly. We demonstrate the applicability of the new estimation at a rather <br>complex data set.
Export
BibTeX
@inproceedings{Theisel-et-al_PG04, TITLE = {Normal Based Estimation of the Curvature Tensor for Triangular Meshes}, AUTHOR = {Theisel, Holger and R{\"o}ssl, Christian and Zayer, Rhaleb and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-2234-3}, DOI = {10.1109/PCCGA.2004.1348359}, LOCALID = {Local-ID: C125675300671F7B-32AFDE8594758F3EC1256EBF0032FD01-Theisel:PG:2004}, PUBLISHER = {IEEE}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {We introduce a new technique for estimating the curvature tensor of a <br>triangular mesh. The input of the algorithm is only a single triangle equipped <br>with its (exact or estimated) vertex normals. This way we get a smooth function <br>of the curvature tensor inside each triangle of the mesh. We show that the <br>error of the new method is comparable with the error of a cubic fitting <br>approach if the incorporated normals are estimated. If the exact normals of the <br>underlying surface are available at the vertices, the error drops signifi- <br>cantly. We demonstrate the applicability of the new estimation at a rather <br>complex data set.}, BOOKTITLE = {Proceedings of the 12th Pacific Conference on Computer Graphics and Applications (PG 2004)}, EDITOR = {Cohen-Or, Daniel and Ko, Hyeong-Seok and Terzopoulos, Demetri and Warren, Joe}, PAGES = {288--297}, ADDRESS = {Seoul, South Korea}, }
Endnote
%0 Conference Proceedings %A Theisel, Holger %A R&#246;ssl, Christian %A Zayer, Rhaleb %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Normal Based Estimation of the Curvature Tensor for Triangular Meshes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2AE6-B %F EDOC: 231930 %F OTHER: Local-ID: C125675300671F7B-32AFDE8594758F3EC1256EBF0032FD01-Theisel:PG:2004 %R 10.1109/PCCGA.2004.1348359 %D 2004 %B 12th Pacific Conference on Computer Graphics and Applications %Z date of event: 2004-10-06 - 2004-10-08 %C Seoul, South Korea %X We introduce a new technique for estimating the curvature tensor of a <br>triangular mesh. The input of the algorithm is only a single triangle equipped <br>with its (exact or estimated) vertex normals. This way we get a smooth function <br>of the curvature tensor inside each triangle of the mesh. We show that the <br>error of the new method is comparable with the error of a cubic fitting <br>approach if the incorporated normals are estimated. If the exact normals of the <br>underlying surface are available at the vertices, the error drops signifi- <br>cantly. We demonstrate the applicability of the new estimation at a rather <br>complex data set. %B Proceedings of the 12th Pacific Conference on Computer Graphics and Applications %E Cohen-Or, Daniel; Ko, Hyeong-Seok; Terzopoulos, Demetri; Warren, Joe %P 288 - 297 %I IEEE %@ 0-7695-2234-3
Theisel, H., Weinkauf, T., Hege, H.-C., and Seidel, H.-P. 2004d. Grid-Independent Detection of Closed Stream Lines in 2D Vector Fields. Vision, modeling, and visualization 2004 (VMV 2004), Akademische Verlagsgesellschaft Aka.
Export
BibTeX
@inproceedings{Theisel-et-al_VMV04, TITLE = {Grid-Independent Detection of Closed Stream Lines in {2D} Vector Fields}, AUTHOR = {Theisel, Holger and Weinkauf, Tino and Hege, Hans-Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-89838-058-0}, LOCALID = {Local-ID: C125675300671F7B-D481049256887DB7C1256F6D0067B7C4-Theisel_VMV_2004}, PUBLISHER = {Akademische Verlagsgesellschaft Aka}, YEAR = {2004}, DATE = {2004}, BOOKTITLE = {Vision, modeling, and visualization 2004 (VMV 2004)}, EDITOR = {Girod, Bernd and Magnor, Marcus and Seidel, Hans-Peter}, PAGES = {421--428}, ADDRESS = {Stanford, USA}, }
Endnote
%0 Conference Proceedings %A Theisel, Holger %A Weinkauf, Tino %A Hege, Hans-Christian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Grid-Independent Detection of Closed Stream Lines in 2D Vector Fields : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2AB1-1 %F EDOC: 231904 %F OTHER: Local-ID: C125675300671F7B-D481049256887DB7C1256F6D0067B7C4-Theisel_VMV_2004 %D 2004 %B 9th International Fall Workshop on Vision, modeling, and visualization 2004 %Z date of event: 2004-11-16 - 2004-11-18 %C Stanford, USA %B Vision, modeling, and visualization 2004 %E Girod, Bernd; Magnor, Marcus; Seidel, Hans-Peter %P 421 - 428 %I Akademische Verlagsgesellschaft Aka %@ 3-89838-058-0
Tawara, T., Myszkowski, K., and Seidel, H.-P. 2004a. Exploiting Temporal Coherence in Final Gathering for Dynamic Scenes. Proceedings of the 2004 Computer Graphics International Conference (CGI 2004), IEEE.
Abstract
Efficient global illumination computation in dynamically<br>changing environments is an important practical<br>problem. In high-quality animation rendering<br>costly "final gathering" technique is commonly<br>used. We extend this technique into temporal domain<br>by exploiting coherence between the subsequent frames.<br>For this purpose we store previously computed<br>incoming radiance samples and refresh them evenly in space and<br>time using some aging criteria. The approach is<br>based upon a two-pass photon mapping algorithm with irradiance<br>cache, but it can be applied also in other gathering methods. The<br>algorithm significantly reduces the cost of expensive indirect<br>lighting computation and suppresses temporal aliasing<br>with respect to the state of the art frame-by-frame<br>rendering techniques.
Export
BibTeX
@inproceedings{Tawara-et-al_CGI04, TITLE = {Exploiting Temporal Coherence in Final Gathering for Dynamic Scenes}, AUTHOR = {Tawara, Takehiro and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-2171-1}, DOI = {10.1109/CGI.2004.1309199}, PUBLISHER = {IEEE}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {Efficient global illumination computation in dynamically<br>changing environments is an important practical<br>problem. In high-quality animation rendering<br>costly "final gathering" technique is commonly<br>used. We extend this technique into temporal domain<br>by exploiting coherence between the subsequent frames.<br>For this purpose we store previously computed<br>incoming radiance samples and refresh them evenly in space and<br>time using some aging criteria. The approach is<br>based upon a two-pass photon mapping algorithm with irradiance<br>cache, but it can be applied also in other gathering methods. The<br>algorithm significantly reduces the cost of expensive indirect<br>lighting computation and suppresses temporal aliasing<br>with respect to the state of the art frame-by-frame<br>rendering techniques.}, BOOKTITLE = {Proceedings of the 2004 Computer Graphics International Conference (CGI 2004)}, EDITOR = {Cohen-Or, Daniel and Jain, Lakhmi and Magnenat-Thalmann, Nadia}, PAGES = {110--119}, ADDRESS = {Crete, Greece}, }
Endnote
%0 Conference Proceedings %A Tawara, Takehiro %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Exploiting Temporal Coherence in Final Gathering for Dynamic Scenes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2A93-5 %F EDOC: 231897 %R 10.1109/CGI.2004.1309199 %D 2004 %B Computer Graphics International 2004 %Z date of event: 2004-06-16 - 2004-06-19 %C Crete, Greece %X Efficient global illumination computation in dynamically<br>changing environments is an important practical<br>problem. In high-quality animation rendering<br>costly "final gathering" technique is commonly<br>used. We extend this technique into temporal domain<br>by exploiting coherence between the subsequent frames.<br>For this purpose we store previously computed<br>incoming radiance samples and refresh them evenly in space and<br>time using some aging criteria. The approach is<br>based upon a two-pass photon mapping algorithm with irradiance<br>cache, but it can be applied also in other gathering methods. The<br>algorithm significantly reduces the cost of expensive indirect<br>lighting computation and suppresses temporal aliasing<br>with respect to the state of the art frame-by-frame<br>rendering techniques. %B Proceedings of the 2004 Computer Graphics International Conference %E Cohen-Or, Daniel; Jain, Lakhmi; Magnenat-Thalmann, Nadia %P 110 - 119 %I IEEE %@ 0-7695-2171-1
Tawara, T., Myszkowski, K., Dmitriev, K., Havran, V., Damez, C., and Seidel, H.-P. 2004b. Exploiting Temporal Coherence in Global Illumination. Proceedings of the 20th Spring Conference on Computer Graphics (SCCG 2004), ACM.
Abstract
Producing high quality animations featuring rich object appearance and<br>compelling lighting effects is very time consuming using traditional<br>frame-by-frame rendering systems. In this paper we present a<br>number of global illumination and rendering solutions that<br>exploit temporal coherence in lighting distribution for subsequent<br>frames to improve the computation performance and overall<br>animation quality. Our strategy relies on extending into temporal domain<br>well-known global illumination techniques<br>such as density estimation photon tracing,<br>photon mapping, and bi-directional path tracing, which were<br>originally designed to handle static scenes only.
Export
BibTeX
@inproceedings{Tawara-et-al_SCCG04, TITLE = {Exploiting Temporal Coherence in Global Illumination}, AUTHOR = {Tawara, Takehiro and Myszkowski, Karol and Dmitriev, Kirill and Havran, Vlastimil and Damez, Cyrille and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {1-58113-914-4}, DOI = {10.1145/1037210.1037214}, LOCALID = {Local-ID: C125675300671F7B-6088B687D952F1E4C1256EC1002F0C62-Tawara2004b}, PUBLISHER = {ACM}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {Producing high quality animations featuring rich object appearance and<br>compelling lighting effects is very time consuming using traditional<br>frame-by-frame rendering systems. In this paper we present a<br>number of global illumination and rendering solutions that<br>exploit temporal coherence in lighting distribution for subsequent<br>frames to improve the computation performance and overall<br>animation quality. Our strategy relies on extending into temporal domain<br>well-known global illumination techniques<br>such as density estimation photon tracing,<br>photon mapping, and bi-directional path tracing, which were<br>originally designed to handle static scenes only.}, BOOKTITLE = {Proceedings of the 20th Spring Conference on Computer Graphics (SCCG 2004)}, EDITOR = {Pasko, Alexander}, PAGES = {23--33}, ADDRESS = {Budmerice, Slovakia}, }
Endnote
%0 Conference Proceedings %A Tawara, Takehiro %A Myszkowski, Karol %A Dmitriev, Kirill %A Havran, Vlastimil %A Damez, Cyrille %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Exploiting Temporal Coherence in Global Illumination : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2A95-1 %F EDOC: 231906 %F OTHER: Local-ID: C125675300671F7B-6088B687D952F1E4C1256EC1002F0C62-Tawara2004b %R 10.1145/1037210.1037214 %D 2004 %B 20th Spring Conference on Computer Graphics %Z date of event: 2004-04-22 - 2004-04-24 %C Budmerice, Slovakia %X Producing high quality animations featuring rich object appearance and<br>compelling lighting effects is very time consuming using traditional<br>frame-by-frame rendering systems. In this paper we present a<br>number of global illumination and rendering solutions that<br>exploit temporal coherence in lighting distribution for subsequent<br>frames to improve the computation performance and overall<br>animation quality. Our strategy relies on extending into temporal domain<br>well-known global illumination techniques<br>such as density estimation photon tracing,<br>photon mapping, and bi-directional path tracing, which were<br>originally designed to handle static scenes only. %B Proceedings of the 20th Spring Conference on Computer Graphics %E Pasko, Alexander %P 23 - 33 %I ACM %@ 1-58113-914-4
Tawara, T., Myszkowski, K., and Seidel, H.-P. 2004c. Efficient Rendering of Strong Secondary Lighting in Photon Mapping Algorithm. Theory and Practice of Computer Graphics 2004 (TPCG 2004), IEEE.
Abstract
In this paper we propose an efficient algorithm<br>for handling strong secondary light sources<br>within the photon mapping framework. We introduce<br>an additional photon map as an implicit representation<br>of such light sources. At the<br>rendering stage this map is used for the explicit<br>sampling of strong indirect lighting in a similar<br>way as it is usually performed for primary light<br>sources. Our technique works fully automatically,<br>improves the computation performance, and leads to<br>a better image quality than traditional rendering<br>approaches.
Export
BibTeX
@inproceedings{Tawara-et-al_TPCG04, TITLE = {Efficient Rendering of Strong Secondary Lighting in Photon Mapping Algorithm}, AUTHOR = {Tawara, Takehiro and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-2137-1}, DOI = {10.1109/TPCG.2004.1314468}, LOCALID = {Local-ID: C125675300671F7B-9FD06C3F844A7B2EC1256E5C003A7515-Tawara2004c}, PUBLISHER = {IEEE}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {In this paper we propose an efficient algorithm<br>for handling strong secondary light sources<br>within the photon mapping framework. We introduce<br>an additional photon map as an implicit representation<br>of such light sources. At the<br>rendering stage this map is used for the explicit<br>sampling of strong indirect lighting in a similar<br>way as it is usually performed for primary light<br>sources. Our technique works fully automatically,<br>improves the computation performance, and leads to<br>a better image quality than traditional rendering<br>approaches.}, BOOKTITLE = {Theory and Practice of Computer Graphics 2004 (TPCG 2004)}, EDITOR = {Lever, Paul G.}, PAGES = {174--178}, ADDRESS = {Bournemouth, UK}, }
Endnote
%0 Conference Proceedings %A Tawara, Takehiro %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Efficient Rendering of Strong Secondary Lighting in Photon Mapping Algorithm : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2A80-F %F EDOC: 231931 %F OTHER: Local-ID: C125675300671F7B-9FD06C3F844A7B2EC1256E5C003A7515-Tawara2004c %R 10.1109/TPCG.2004.1314468 %D 2004 %B Theory and Practice of Computer Graphics 2004 %Z date of event: 2004-06-08 - 2004-06-10 %C Bournemouth, UK %X In this paper we propose an efficient algorithm<br>for handling strong secondary light sources<br>within the photon mapping framework. We introduce<br>an additional photon map as an implicit representation<br>of such light sources. At the<br>rendering stage this map is used for the explicit<br>sampling of strong indirect lighting in a similar<br>way as it is usually performed for primary light<br>sources. Our technique works fully automatically,<br>improves the computation performance, and leads to<br>a better image quality than traditional rendering<br>approaches. %B Theory and Practice of Computer Graphics 2004 %E Lever, Paul G. %P 174 - 178 %I IEEE %@ 0-7695-2137-1
Sunkel, M., Kautz, J., and Seidel, H.-P. 2004. Rendering and Simulation of Liquid Foams. Vision, Modeling, and Visualization 2004 (VMV 2004), Aka GmbH.
Export
BibTeX
@inproceedings{DBLP:conf/vmv/SunkelKS04, TITLE = {Rendering and Simulation of Liquid Foams}, AUTHOR = {Sunkel, Martin and Kautz, Jan and Seidel, Hans-Peter}, LANGUAGE = {eng}, PUBLISHER = {Aka GmbH}, YEAR = {2004}, DATE = {2004}, BOOKTITLE = {Vision, Modeling, and Visualization 2004 (VMV 2004)}, EDITOR = {Girod, Bernd and Magnor, Marcus A. and Seidel, Hans-Peter}, PAGES = {263--269}, ADDRESS = {Stanford, CA, USA}, }
Endnote
%0 Conference Proceedings %A Sunkel, Martin %A Kautz, Jan %A Seidel, Hans-Peter %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Rendering and Simulation of Liquid Foams : %G eng %U http://hdl.handle.net/21.11116/0000-000F-13B4-0 %D 2004 %B 9th International Fall Workshop on Vision, Modeling, and Visualization %Z date of event: 2004-11-16 - 2004-11-18 %C Stanford, CA, USA %B Vision, Modeling, and Visualization 2004 %E Girod, Bernd; Magnor, Marcus A.; Seidel, Hans-Peter %P 263 - 269 %I Aka GmbH
Sorkine, O., Lipman, Y., Cohen-Or, D., Alexa, M., Rössl, C., and Seidel, H.-P. 2004. Laplacian Surface Editing. SGP 2004: Symposium on Geometry Processing, The Eurographics Association.
Abstract
Surface editing operations commonly require geometric details of the surface to <br>be preserved as much as possible. We argue that geometric detail is an <br>intrinsic property of a surface and that, consequently, surface editing is best <br>performed by operating over an intrinsic surface representation. We provide <br>such a representation of a surface, based on the Laplacian of the mesh, by <br>encoding each vertex relative to its neighborhood. The Laplacian of the mesh is <br>enhanced to be invariant to locally linearized rigid transformations and <br>scaling. Based on this Laplacian representation, we develop useful editing <br>operations: interactive free-form deformation in a region of interest based on <br>the transformation of a handle, transfer and mixing of geometric details <br>between two surfaces, and transplanting of a partial surface mesh onto another <br>surface. The main computation involved in all operations is the solution of a <br>sparse linear system, which can be done at interactive rates. We demonstrate <br>the effectiveness of our approach in several examples, showing that the editing <br>operations change the shape while respecting the structural geometric detail.
Export
BibTeX
@inproceedings{Sorkine-et-al_SGP04, TITLE = {Laplacian Surface Editing}, AUTHOR = {Sorkine, Olga and Lipman, Yaron and Cohen-Or, Daniel and Alexa, Marc and R{\"o}ssl, Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-905673-13-4/1727-8384}, DOI = {10.2312/SGP/SGP04/179-188}, LOCALID = {Local-ID: C125675300671F7B-33613951BE9C91C8C1256E9B002FBA94-SLCARS:2004}, PUBLISHER = {The Eurographics Association}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {Surface editing operations commonly require geometric details of the surface to <br>be preserved as much as possible. We argue that geometric detail is an <br>intrinsic property of a surface and that, consequently, surface editing is best <br>performed by operating over an intrinsic surface representation. We provide <br>such a representation of a surface, based on the Laplacian of the mesh, by <br>encoding each vertex relative to its neighborhood. The Laplacian of the mesh is <br>enhanced to be invariant to locally linearized rigid transformations and <br>scaling. Based on this Laplacian representation, we develop useful editing <br>operations: interactive free-form deformation in a region of interest based on <br>the transformation of a handle, transfer and mixing of geometric details <br>between two surfaces, and transplanting of a partial surface mesh onto another <br>surface. The main computation involved in all operations is the solution of a <br>sparse linear system, which can be done at interactive rates. We demonstrate <br>the effectiveness of our approach in several examples, showing that the editing <br>operations change the shape while respecting the structural geometric detail.}, BOOKTITLE = {SGP 2004: Symposium on Geometry Processing}, EDITOR = {Scopigno, Roberto and Zorin, Denis and Fellner, Dieter and Spencer, Stephen}, PAGES = {179--188, 274}, ADDRESS = {Nice, France}, }
Endnote
%0 Conference Proceedings %A Sorkine, Olga %A Lipman, Yaron %A Cohen-Or, Daniel %A Alexa, Marc %A R&#246;ssl, Christian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Laplacian Surface Editing : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2ACE-3 %F EDOC: 231932 %F OTHER: Local-ID: C125675300671F7B-33613951BE9C91C8C1256E9B002FBA94-SLCARS:2004 %R 10.2312/SGP/SGP04/179-188 %D 2004 %B 2004 Symposium on Geometry Processing %Z date of event: 2004-07-08 - 2004-07-10 %C Nice, France %X Surface editing operations commonly require geometric details of the surface to <br>be preserved as much as possible. We argue that geometric detail is an <br>intrinsic property of a surface and that, consequently, surface editing is best <br>performed by operating over an intrinsic surface representation. We provide <br>such a representation of a surface, based on the Laplacian of the mesh, by <br>encoding each vertex relative to its neighborhood. The Laplacian of the mesh is <br>enhanced to be invariant to locally linearized rigid transformations and <br>scaling. Based on this Laplacian representation, we develop useful editing <br>operations: interactive free-form deformation in a region of interest based on <br>the transformation of a handle, transfer and mixing of geometric details <br>between two surfaces, and transplanting of a partial surface mesh onto another <br>surface. The main computation involved in all operations is the solution of a <br>sparse linear system, which can be done at interactive rates. We demonstrate <br>the effectiveness of our approach in several examples, showing that the editing <br>operations change the shape while respecting the structural geometric detail. %B SGP 2004: Symposium on Geometry Processing %E Scopigno, Roberto; Zorin, Denis; Fellner, Dieter; Spencer, Stephen %P 179 - 188, 274 %I The Eurographics Association %@ 3-905673-13-4/1727-8384
Schoner, J.L., Lang, J., and Seidel, H.-P. 2004. Measurement-based Interactive Simulation of Viscoelastic Solids. Computer Graphics Forum (Proc. EUROGRAPHICS 2004), Blackwell.
Export
BibTeX
@inproceedings{Schoner-et-al_EUROGRAPHICS04, TITLE = {Measurement-based Interactive Simulation of Viscoelastic Solids}, AUTHOR = {Schoner, Jeffrey L. and Lang, Jochen and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2004.00786.x}, LOCALID = {Local-ID: C125675300671F7B-3F30E69254FA88FCC1256F6D00523DC7-Schoner2004}, PUBLISHER = {Blackwell}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2004}, DATE = {2004}, BOOKTITLE = {The European Association for Computer Graphics 25th Annual Conference (EUROGRAPHICS 2004)}, EDITOR = {Cani, Marie-Paule and Slater, Mel}, PAGES = {547--556}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {23}, ISSUE = {3}, ADDRESS = {Grenoble, France}, }
Endnote
%0 Conference Proceedings %A Schoner, Jeffrey L. %A Lang, Jochen %A Seidel, Hans-Peter %+ Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Measurement-based Interactive Simulation of Viscoelastic Solids : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2AD8-B %F EDOC: 231933 %F OTHER: Local-ID: C125675300671F7B-3F30E69254FA88FCC1256F6D00523DC7-Schoner2004 %R 10.1111/j.1467-8659.2004.00786.x %D 2004 %B The European Association for Computer Graphics 25th Annual Conference %Z date of event: 2004-08-30 - %C Grenoble, France %B The European Association for Computer Graphics 25th Annual Conference %E Cani, Marie-Paule; Slater, Mel %P 547 - 556 %I Blackwell %J Computer Graphics Forum %V 23 %N 3 %I Blackwell-Wiley %@ false
Scholz, V. and Magnor, M. 2004. Cloth Motion from Optical Flow. Vision, modeling, and visualization 2004 (VMV-04), Akademische Verlagsgesellschaft Aka.
Abstract
This paper presents an algorithm for capturing the motion of deformable surfaces, in particular textured cloth. In a calibrated multi-camera setup, the optical flow between consecutive video frames is determined and 3D scene flow is computed. We use a deformable surface model with constraints for vertex distances and curvature to increase the robustness of the optical flow measurements. Tracking errors in long video sequences are corrected by a silhouette matching procedure. We present results for synthetic cloth simulations and discuss how they can be extended to real-world footage.
Export
BibTeX
@inproceedings{Scholz2004, TITLE = {Cloth Motion from Optical Flow}, AUTHOR = {Scholz, Volker and Magnor, Marcus}, EDITOR = {Girod, B. and Magnor, M. and Seidel, H.-P.}, LANGUAGE = {eng}, ISBN = {3-89838-058-0}, LOCALID = {Local-ID: C1256BDE005F57A8-DAB1C4108465E349C1256F5E003D5E98-Scholz2004}, PUBLISHER = {Akademische Verlagsgesellschaft Aka}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {This paper presents an algorithm for capturing the motion of deformable surfaces, in particular textured cloth. In a calibrated multi-camera setup, the optical flow between consecutive video frames is determined and 3D scene flow is computed. We use a deformable surface model with constraints for vertex distances and curvature to increase the robustness of the optical flow measurements. Tracking errors in long video sequences are corrected by a silhouette matching procedure. We present results for synthetic cloth simulations and discuss how they can be extended to real-world footage.}, BOOKTITLE = {Vision, modeling, and visualization 2004 (VMV-04)}, PAGES = {117--124}, }
Endnote
%0 Conference Proceedings %A Scholz, Volker %A Magnor, Marcus %E Girod, B. %E Magnor, M. %E Seidel, H.-P. %+ Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Cloth Motion from Optical Flow : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2A4E-1 %F EDOC: 231802 %F OTHER: Local-ID: C1256BDE005F57A8-DAB1C4108465E349C1256F5E003D5E98-Scholz2004 %I Akademische Verlagsgesellschaft Aka %D 2004 %B Untitled Event %Z date of event: 2004-11-16 - %C Stanford, USA %X This paper presents an algorithm for capturing the motion of deformable surfaces, in particular textured cloth. In a calibrated multi-camera setup, the optical flow between consecutive video frames is determined and 3D scene flow is computed. We use a deformable surface model with constraints for vertex distances and curvature to increase the robustness of the optical flow measurements. Tracking errors in long video sequences are corrected by a silhouette matching procedure. We present results for synthetic cloth simulations and discuss how they can be extended to real-world footage. %B Vision, modeling, and visualization 2004 (VMV-04) %P 117 - 124 %I Akademische Verlagsgesellschaft Aka %@ 3-89838-058-0
Rössl, C., Zeilfelder, F., Nürnberger, G., and Seidel, H.-P. 2004a. Spline Approximation of General Volumetric Data. Proceedings of the 9th ACM Symposium on Solid Modeling and Applications (SM 2004), Eurographics.
Abstract
We present an efficient algorithm for approximating huge general<br> volumetric data sets, i.e.~the data is given over arbitrarily shaped<br> volumes and consists of up to millions of samples. The method is based<br> on cubic trivariate splines, i.e.~piecewise polynomials of total<br> degree three defined w.r.t. uniform type-6 tetrahedral partitions of<br> the volumetric domain. Similar as in the recent bivariate<br> approximation approaches, the splines in three variables<br> are automatically determined from the discrete data as a result of a<br> two-step method, where local discrete least<br> squares polynomial approximations of varying degrees are extended by<br> using natural conditions, i.e.the continuity and smoothness properties<br> which determine the underlying spline space. The main advantages of<br> this approach with linear algorithmic complexity are as follows: no<br> tetrahedral partition of the volume data is needed, only small<br> linear systems have to be solved, the local variation and<br> distribution of the data is automatically adapted,<br> Bernstein-B{\'e}zier techniques well-known in Computer Aided<br> Geometric Design (CAGD) can be fully exploited, noisy data are<br> automatically smoothed. Our numerical examples with huge data sets<br> for synthetic data as well as some real-world data confirm the<br> efficiency of the methods, show the high quality of the spline<br> approximation, and illustrate that the rendered iso-surfaces inherit<br> a visual smooth appearance from the volume approximating splines.
Export
BibTeX
@inproceedings{Rossl-et-al_SM04, TITLE = {Spline Approximation of General Volumetric Data}, AUTHOR = {R{\"o}ssl, Christian and Zeilfelder, Frank and N{\"u}rnberger, G{\"u}nther and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-905673-55-X}, DOI = {10.2312/sm.20041378}, LOCALID = {Local-ID: C125675300671F7B-F422C083451D4DC4C1256E76002C2509-rzns:sagvd:04}, PUBLISHER = {Eurographics}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {We present an efficient algorithm for approximating huge general<br> volumetric data sets, i.e.~the data is given over arbitrarily shaped<br> volumes and consists of up to millions of samples. The method is based<br> on cubic trivariate splines, i.e.~piecewise polynomials of total<br> degree three defined w.r.t. uniform type-6 tetrahedral partitions of<br> the volumetric domain. Similar as in the recent bivariate<br> approximation approaches, the splines in three variables<br> are automatically determined from the discrete data as a result of a<br> two-step method, where local discrete least<br> squares polynomial approximations of varying degrees are extended by<br> using natural conditions, i.e.the continuity and smoothness properties<br> which determine the underlying spline space. The main advantages of<br> this approach with linear algorithmic complexity are as follows: no<br> tetrahedral partition of the volume data is needed, only small<br> linear systems have to be solved, the local variation and<br> distribution of the data is automatically adapted,<br> Bernstein-B{\'e}zier techniques well-known in Computer Aided<br> Geometric Design (CAGD) can be fully exploited, noisy data are<br> automatically smoothed. Our numerical examples with huge data sets<br> for synthetic data as well as some real-world data confirm the<br> efficiency of the methods, show the high quality of the spline<br> approximation, and illustrate that the rendered iso-surfaces inherit<br> a visual smooth appearance from the volume approximating splines.}, BOOKTITLE = {Proceedings of the 9th ACM Symposium on Solid Modeling and Applications (SM 2004)}, EDITOR = {Elber, Gershon and Patrikalakis, Nick and Brunet, Pere}, PAGES = {71--82}, ADDRESS = {Genova, Italy}, }
Endnote
%0 Conference Proceedings %A R&#246;ssl, Christian %A Zeilfelder, Frank %A N&#252;rnberger, G&#252;nther %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Spline Approximation of General Volumetric Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-29CC-B %F EDOC: 231348 %F OTHER: Local-ID: C125675300671F7B-F422C083451D4DC4C1256E76002C2509-rzns:sagvd:04 %R 10.2312/sm.20041378 %D 2004 %B 9th ACM Symposium on Solid Modeling and Applications %Z date of event: 2004-06-09 - 2004-06-11 %C Genova, Italy %X We present an efficient algorithm for approximating huge general<br> volumetric data sets, i.e.~the data is given over arbitrarily shaped<br> volumes and consists of up to millions of samples. The method is based<br> on cubic trivariate splines, i.e.~piecewise polynomials of total<br> degree three defined w.r.t. uniform type-6 tetrahedral partitions of<br> the volumetric domain. Similar as in the recent bivariate<br> approximation approaches, the splines in three variables<br> are automatically determined from the discrete data as a result of a<br> two-step method, where local discrete least<br> squares polynomial approximations of varying degrees are extended by<br> using natural conditions, i.e.the continuity and smoothness properties<br> which determine the underlying spline space. The main advantages of<br> this approach with linear algorithmic complexity are as follows: no<br> tetrahedral partition of the volume data is needed, only small<br> linear systems have to be solved, the local variation and<br> distribution of the data is automatically adapted,<br> Bernstein-B{\'e}zier techniques well-known in Computer Aided<br> Geometric Design (CAGD) can be fully exploited, noisy data are<br> automatically smoothed. Our numerical examples with huge data sets<br> for synthetic data as well as some real-world data confirm the<br> efficiency of the methods, show the high quality of the spline<br> approximation, and illustrate that the rendered iso-surfaces inherit<br> a visual smooth appearance from the volume approximating splines. %B Proceedings of the 9th ACM Symposium on Solid Modeling and Applications (SM 2004) %E Elber, Gershon; Patrikalakis, Nick; Brunet, Pere %P 71 - 82 %I Eurographics %@ 3-905673-55-X
Rössl, C., Zeilfelder, F., Nürnberger, G., and Seidel, H.-P. 2004b. Reconstruction of Volume Data with Quadratic Super Splines. IEEE Transactions on Visualization and Computer Graphics10, 4.
Abstract
We propose a new approach to reconstruct nondiscrete models from gridded volume <br>samples. As a model, we use quadratic trivariate super splines on a uniform <br>tetrahedral partition. We discuss the smoothness and approximation properties <br>of our model and compare to alternative piecewise polynomial constructions. We <br>observe as a non-standard phenomenon that the derivatives of our splines yield <br>optimal approximation order for smooth data, while the theoretical error of the <br>values is nearly optimal due to the averaging rules. Our approach enables <br>efficient reconstruction and visualization of the data. As the piecewise <br>polynomials are of the lowest possible total degree two, we can efficiently <br>determine exact ray intersections with an iso-surface for ray-casting. <br>Moreover, the optimal approximation properties of the derivatives allow to <br>simply sample the necessary gradients directly from the polynomial pieces of <br>the splines. Our results confirm the efficiency of the quasi-interpolating <br>method and demonstrate high visual quality for rendered isosurfaces.
Export
BibTeX
@article{Rossl-et-al_TVCG04, TITLE = {Reconstruction of Volume Data with Quadratic Super Splines}, AUTHOR = {R{\"o}ssl, Christian and Zeilfelder, Frank and N{\"u}rnberger, G{\"u}nther and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2004.16}, LOCALID = {Local-ID: C125675300671F7B-136B570DDA9525FAC1256E5300465D7C-rzns:qss:2004}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {New York, NY}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {We propose a new approach to reconstruct nondiscrete models from gridded volume <br>samples. As a model, we use quadratic trivariate super splines on a uniform <br>tetrahedral partition. We discuss the smoothness and approximation properties <br>of our model and compare to alternative piecewise polynomial constructions. We <br>observe as a non-standard phenomenon that the derivatives of our splines yield <br>optimal approximation order for smooth data, while the theoretical error of the <br>values is nearly optimal due to the averaging rules. Our approach enables <br>efficient reconstruction and visualization of the data. As the piecewise <br>polynomials are of the lowest possible total degree two, we can efficiently <br>determine exact ray intersections with an iso-surface for ray-casting. <br>Moreover, the optimal approximation properties of the derivatives allow to <br>simply sample the necessary gradients directly from the polynomial pieces of <br>the splines. Our results confirm the efficiency of the quasi-interpolating <br>method and demonstrate high visual quality for rendered isosurfaces.}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics}, VOLUME = {10}, NUMBER = {4}, PAGES = {397--409}, }
Endnote
%0 Journal Article %A R&#246;ssl, Christian %A Zeilfelder, Frank %A N&#252;rnberger, G&#252;nther %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Reconstruction of Volume Data with Quadratic Super Splines : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2B18-2 %F EDOC: 231861 %F OTHER: Local-ID: C125675300671F7B-136B570DDA9525FAC1256E5300465D7C-rzns:qss:2004 %R 10.1109/TVCG.2004.16 %D 2004 %* Review method: peer-reviewed %X We propose a new approach to reconstruct nondiscrete models from gridded volume <br>samples. As a model, we use quadratic trivariate super splines on a uniform <br>tetrahedral partition. We discuss the smoothness and approximation properties <br>of our model and compare to alternative piecewise polynomial constructions. We <br>observe as a non-standard phenomenon that the derivatives of our splines yield <br>optimal approximation order for smooth data, while the theoretical error of the <br>values is nearly optimal due to the averaging rules. Our approach enables <br>efficient reconstruction and visualization of the data. As the piecewise <br>polynomials are of the lowest possible total degree two, we can efficiently <br>determine exact ray intersections with an iso-surface for ray-casting. <br>Moreover, the optimal approximation properties of the derivatives allow to <br>simply sample the necessary gradients directly from the polynomial pieces of <br>the splines. Our results confirm the efficiency of the quasi-interpolating <br>method and demonstrate high visual quality for rendered isosurfaces. %J IEEE Transactions on Visualization and Computer Graphics %V 10 %N 4 %& 397 %P 397 - 409 %I IEEE Computer Society %C New York, NY %@ false
Ohtake, Y., Belyaev, A., and Seidel, H.-P. 2004a. 3D Scattered Data Approximation with Adaptive Compactly Supported Radial Basis Functions. Shape Modeling International 2004 (SMI 2004), IEEE.
Abstract
In this paper, we develop an adaptive RBF fitting procedure <br>for a high quality approximation of a set of points scattered <br>over a piecewise smooth surface. We use compactly supported <br>RBFs whose centers are randomly chosen from the points. <br>The randomness is controlled by the point density and surface <br>geometry. For each RBF, its support size is chosen adaptively <br>according to surface geometry at a vicinity of the RBF center.<br>All these lead to a noise-robust high quality approximation of <br>the set. We also adapt our basic technique for shape<br>reconstruction from registered range scans by taking into <br>account measurement confidences. Finally, an interesting link<br>between our RBF fitting procedure and partition of unity<br>approximations is established and discussed.
Export
BibTeX
@inproceedings{Ohtake-et-al_SMI04, TITLE = {{3D} Scattered Data Approximation with Adaptive Compactly Supported Radial Basis Functions}, AUTHOR = {Ohtake, Yutaka and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-2075-8}, DOI = {10.1109/SMI.2004.1314491}, LOCALID = {Local-ID: C125675300671F7B-01FB2D4B5B4C2508C1256FAF0037848B-smi04obs}, PUBLISHER = {IEEE}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {In this paper, we develop an adaptive RBF fitting procedure <br>for a high quality approximation of a set of points scattered <br>over a piecewise smooth surface. We use compactly supported <br>RBFs whose centers are randomly chosen from the points. <br>The randomness is controlled by the point density and surface <br>geometry. For each RBF, its support size is chosen adaptively <br>according to surface geometry at a vicinity of the RBF center.<br>All these lead to a noise-robust high quality approximation of <br>the set. We also adapt our basic technique for shape<br>reconstruction from registered range scans by taking into <br>account measurement confidences. Finally, an interesting link<br>between our RBF fitting procedure and partition of unity<br>approximations is established and discussed.}, BOOKTITLE = {Shape Modeling International 2004 (SMI 2004)}, EDITOR = {Giannini, Franca and Pasko, Alexander}, PAGES = {31--39}, ADDRESS = {Genova, Italy}, }
Endnote
%0 Conference Proceedings %A Ohtake, Yutaka %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T 3D Scattered Data Approximation with Adaptive Compactly Supported Radial Basis Functions : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-29F1-6 %F EDOC: 231941 %F OTHER: Local-ID: C125675300671F7B-01FB2D4B5B4C2508C1256FAF0037848B-smi04obs %R 10.1109/SMI.2004.1314491 %D 2004 %B 2004 International Conference on Shape Modeling and Applications %Z date of event: 2004-06-07 - 2004-06-09 %C Genova, Italy %X In this paper, we develop an adaptive RBF fitting procedure <br>for a high quality approximation of a set of points scattered <br>over a piecewise smooth surface. We use compactly supported <br>RBFs whose centers are randomly chosen from the points. <br>The randomness is controlled by the point density and surface <br>geometry. For each RBF, its support size is chosen adaptively <br>according to surface geometry at a vicinity of the RBF center.<br>All these lead to a noise-robust high quality approximation of <br>the set. We also adapt our basic technique for shape<br>reconstruction from registered range scans by taking into <br>account measurement confidences. Finally, an interesting link<br>between our RBF fitting procedure and partition of unity<br>approximations is established and discussed. %B Shape Modeling International 2004 %E Giannini, Franca; Pasko, Alexander %P 31 - 39 %I IEEE %@ 0-7695-2075-8
Ohtake, Y., Belyaev, A., and Seidel, H.-P. 2004b. Ridge-valley Lines on Meshes via Implicit Surface Fitting. ACM Transactions on Graphics23, 3.
Abstract
We propose a simple and effective method for detecting view- <br>and scale-independent ridge-valley lines defined via first- <br>and second-order curvature derivatives on shapes approximated <br>by dense triangle meshes. A high-quality estimation of <br>high-order surface derivatives is achieved by combining<br>multi-level implicit surface fitting and finite difference<br>approximations. We demonstrate that the ridges and valleys are<br>geometrically and perceptually salient surface features and,<br>therefore, can be potentially used for shape recognition, <br>coding, and quality evaluation purposes.
Export
BibTeX
@article{Ohtake-et-al_TG04, TITLE = {Ridge-valley Lines on Meshes via Implicit Surface Fitting}, AUTHOR = {Ohtake, Yutaka and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/1015706.1015768}, LOCALID = {Local-ID: C125675300671F7B-6F07B4989FD2DD0AC1256FAF00368F7F-Belyaev2004ridge}, PUBLISHER = {Association for Computing Machinery}, ADDRESS = {New York, NY}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {We propose a simple and effective method for detecting view- <br>and scale-independent ridge-valley lines defined via first- <br>and second-order curvature derivatives on shapes approximated <br>by dense triangle meshes. A high-quality estimation of <br>high-order surface derivatives is achieved by combining<br>multi-level implicit surface fitting and finite difference<br>approximations. We demonstrate that the ridges and valleys are<br>geometrically and perceptually salient surface features and,<br>therefore, can be potentially used for shape recognition, <br>coding, and quality evaluation purposes.}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {23}, NUMBER = {3}, PAGES = {609--612}, }
Endnote
%0 Journal Article %A Ohtake, Yutaka %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Ridge-valley Lines on Meshes via Implicit Surface Fitting : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2B2D-3 %F EDOC: 231942 %F OTHER: Local-ID: C125675300671F7B-6F07B4989FD2DD0AC1256FAF00368F7F-Belyaev2004ridge %R 10.1145/1015706.1015768 %D 2004 %* Review method: peer-reviewed %X We propose a simple and effective method for detecting view- <br>and scale-independent ridge-valley lines defined via first- <br>and second-order curvature derivatives on shapes approximated <br>by dense triangle meshes. A high-quality estimation of <br>high-order surface derivatives is achieved by combining<br>multi-level implicit surface fitting and finite difference<br>approximations. We demonstrate that the ridges and valleys are<br>geometrically and perceptually salient surface features and,<br>therefore, can be potentially used for shape recognition, <br>coding, and quality evaluation purposes. %J ACM Transactions on Graphics %V 23 %N 3 %& 609 %P 609 - 612 %I Association for Computing Machinery %C New York, NY %@ false
Marmitt, G., Kleer, A., Friedrich, H., Wald, I., and Slusallek, P. 2004. Fast and Accurate Ray-Voxel Intersection Techniques for Iso-Surface Ray Tracing. Vision, Modeling, and Visualization 2004 (VMV-04) (VWV 2004), Akademische Verlagsgesellschaft Aka.
Abstract
Visualizing iso-surfaces of volumetric data sets is becoming increasingly important for many practical applications. One crucial task in iso-surface ray tracing is to find the correct intersection of a ray with the trilinear-interpolated implicit surface defined by the data values at the vertices of a given voxel. Currently available solutions are either accurate but slow or they provide fast but only approximate solutions. In this paper, we analyze the available techniques and present a new intersection algorithm. We compare and evaluate the new algorithm against previous approaches using both synthetic test cases and real world data sets. The new algorithm is roughly three times faster but provides the same image quality and better numerical stability as previous accurate solutions.
Export
BibTeX
@inproceedings{marmitt:04:IsoIsec, TITLE = {Fast and Accurate Ray-Voxel Intersection Techniques for Iso-Surface Ray Tracing}, AUTHOR = {Marmitt, Gerd and Kleer, Andreas and Friedrich, Heiko and Wald, Ingo and Slusallek, Philipp}, EDITOR = {Girod, Bernd and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-89838-058-0}, LOCALID = {Local-ID: C125675300671F7B-2FFF0E33D4E3EF68C1256F7100563528-marmitt:04:IsoIsec}, PUBLISHER = {Akademische Verlagsgesellschaft Aka}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {Visualizing iso-surfaces of volumetric data sets is becoming increasingly important for many practical applications. One crucial task in iso-surface ray tracing is to find the correct intersection of a ray with the trilinear-interpolated implicit surface defined by the data values at the vertices of a given voxel. Currently available solutions are either accurate but slow or they provide fast but only approximate solutions. In this paper, we analyze the available techniques and present a new intersection algorithm. We compare and evaluate the new algorithm against previous approaches using both synthetic test cases and real world data sets. The new algorithm is roughly three times faster but provides the same image quality and better numerical stability as previous accurate solutions.}, BOOKTITLE = {Vision, Modeling, and Visualization 2004 (VMV-04) (VWV 2004)}, PAGES = {429--435}, ADDRESS = {Stanford, USA}, }
Endnote
%0 Conference Proceedings %A Marmitt, Gerd %A Kleer, Andreas %A Friedrich, Heiko %A Wald, Ingo %A Slusallek, Philipp %E Girod, Bernd %E Magnor, Marcus %E Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Discrete Optimization, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Fast and Accurate Ray-Voxel Intersection Techniques for Iso-Surface Ray Tracing : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-29C9-2 %F EDOC: 231344 %F OTHER: Local-ID: C125675300671F7B-2FFF0E33D4E3EF68C1256F7100563528-marmitt:04:IsoIsec %D 2004 %B 9th International Workshop on Vision Modeling and Visualization %Z date of event: 2004-11-16 - %C Stanford, USA %X Visualizing iso-surfaces of volumetric data sets is becoming increasingly important for many practical applications. One crucial task in iso-surface ray tracing is to find the correct intersection of a ray with the trilinear-interpolated implicit surface defined by the data values at the vertices of a given voxel. Currently available solutions are either accurate but slow or they provide fast but only approximate solutions. In this paper, we analyze the available techniques and present a new intersection algorithm. We compare and evaluate the new algorithm against previous approaches using both synthetic test cases and real world data sets. The new algorithm is roughly three times faster but provides the same image quality and better numerical stability as previous accurate solutions. %B Vision, Modeling, and Visualization 2004 (VMV-04) %P 429 - 435 %I Akademische Verlagsgesellschaft Aka %@ 3-89838-058-0
Mantiuk, R., Myszkowski, K., and Seidel, H.-P. 2004a. Visible Difference Predicator for High Dynamic Range Images. 2004 IEEE International Conference on Systems, Man & Cybernetics (SMC 2004), IEEE.
Abstract
Since new imaging and rendering systems commonly use physically<br>accurate lighting information in the form of High-Dynamic Range<br>data, there is a need for an automatic visual quality assessment of the <br>resulting images. In this work we extend the Visual Difference Predictor (VDP) <br>developed by Daly to handle HDR data. This let us predict if a human observer <br>is able to perceive differences for a pair of HDR images under the adaptation <br>conditions corresponding to the real scene observation.
Export
BibTeX
@inproceedings{Mantiuk2004HDRVDP, TITLE = {Visible Difference Predicator for High Dynamic Range Images}, AUTHOR = {Mantiuk, Rafal and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7803-8567-5}, DOI = {10.1109/ICSMC.2004.1400750}, LOCALID = {Local-ID: C125675300671F7B-4A5E8413EEF67127C1256F330053216A-Mantiuk2004HDRVDP}, PUBLISHER = {IEEE}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {Since new imaging and rendering systems commonly use physically<br>accurate lighting information in the form of High-Dynamic Range<br>data, there is a need for an automatic visual quality assessment of the <br>resulting images. In this work we extend the Visual Difference Predictor (VDP) <br>developed by Daly to handle HDR data. This let us predict if a human observer <br>is able to perceive differences for a pair of HDR images under the adaptation <br>conditions corresponding to the real scene observation.}, BOOKTITLE = {2004 IEEE International Conference on Systems, Man \& Cybernetics (SMC 2004)}, EDITOR = {Thissen, Wil and Wieringa, Peter and Pantic, Maja and Ludema, Marcel}, PAGES = {2763--2769}, ADDRESS = {The Hague, The Netherlands}, }
Endnote
%0 Conference Proceedings %A Mantiuk, Rafal %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Visible Difference Predicator for High Dynamic Range Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2B78-C %F EDOC: 231938 %R 10.1109/ICSMC.2004.1400750 %F OTHER: Local-ID: C125675300671F7B-4A5E8413EEF67127C1256F330053216A-Mantiuk2004HDRVDP %D 2004 %B 2004 IEEE International Conference on Systems, Man & Cybernetics %Z date of event: 2004-10-10 - 2004-10-13 %C The Hague, The Netherlands %X Since new imaging and rendering systems commonly use physically<br>accurate lighting information in the form of High-Dynamic Range<br>data, there is a need for an automatic visual quality assessment of the <br>resulting images. In this work we extend the Visual Difference Predictor (VDP) <br>developed by Daly to handle HDR data. This let us predict if a human observer <br>is able to perceive differences for a pair of HDR images under the adaptation <br>conditions corresponding to the real scene observation. %B 2004 IEEE International Conference on Systems, Man & Cybernetics %E Thissen, Wil; Wieringa, Peter; Pantic, Maja; Ludema, Marcel %P 2763 - 2769 %I IEEE %@ 0-7803-8567-5
Mantiuk, R., Krawczyk, G., Myszkowski, K., and Seidel, H.-P. 2004b. Perception-motivated High Dynamic Range Video Encoding. ACM Transactions on Graphics23, 3.
Abstract
Due to rapid technological progress in high dynamic range (HDR)<br> video capture and display, the efficient storage and<br> transmission of such data is crucial for the completeness of any HDR<br> imaging pipeline. We propose a new approach for<br> inter-frame encoding of HDR video, which is embedded in the<br> well-established MPEG-4 video compression standard. The key<br> component of our technique is luminance quantization <br> that is optimized for the contrast threshold perception in the<br> human visual system. The quantization<br> scheme requires only 10--11 bits to encode 12 orders of magnitude of<br> visible luminance range and does not lead to perceivable contouring<br> artifacts. Besides video encoding, the proposed quantization<br> provides perceptually-optimized luminance sampling for fast<br> implementation of any<br> global tone mapping operator using a lookup table.<br> To improve the quality of synthetic video sequences, we introduce<br> a coding scheme for discrete cosine transform (DCT) blocks with<br> high contrast. We demonstrate the capabilities of HDR video in<br> a player, which enables decoding, tone mapping, and applying<br> post-processing effects in real-time. The tone mapping algorithm as well<br> as its parameters can be changed interactively while the video is playing.<br> We can simulate post-processing<br> effects such as glare, night vision, and motion blur, which appear<br> very realistic due to the usage of HDR data.
Export
BibTeX
@article{Mantiuk-et-al_TG04, TITLE = {Perception-motivated High Dynamic Range Video Encoding}, AUTHOR = {Mantiuk, Rafal and Krawczyk, Grzegorz and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/1015706.1015794}, LOCALID = {Local-ID: C125675300671F7B-2BA4C8B1EE81007BC1256EC1003757E0-Mantiuk2004HDREnc}, PUBLISHER = {Association for Computing Machinery}, ADDRESS = {New York, NY}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {Due to rapid technological progress in high dynamic range (HDR)<br> video capture and display, the efficient storage and<br> transmission of such data is crucial for the completeness of any HDR<br> imaging pipeline. We propose a new approach for<br> inter-frame encoding of HDR video, which is embedded in the<br> well-established MPEG-4 video compression standard. The key<br> component of our technique is luminance quantization <br> that is optimized for the contrast threshold perception in the<br> human visual system. The quantization<br> scheme requires only 10--11 bits to encode 12 orders of magnitude of<br> visible luminance range and does not lead to perceivable contouring<br> artifacts. Besides video encoding, the proposed quantization<br> provides perceptually-optimized luminance sampling for fast<br> implementation of any<br> global tone mapping operator using a lookup table.<br> To improve the quality of synthetic video sequences, we introduce<br> a coding scheme for discrete cosine transform (DCT) blocks with<br> high contrast. We demonstrate the capabilities of HDR video in<br> a player, which enables decoding, tone mapping, and applying<br> post-processing effects in real-time. The tone mapping algorithm as well<br> as its parameters can be changed interactively while the video is playing.<br> We can simulate post-processing<br> effects such as glare, night vision, and motion blur, which appear<br> very realistic due to the usage of HDR data.}, JOURNAL = {ACM Transactions on Graphics}, EDITOR = {Marks, Joe}, VOLUME = {23}, NUMBER = {3}, PAGES = {733--741}, }
Endnote
%0 Journal Article %A Mantiuk, Rafal %A Krawczyk, Grzegorz %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perception-motivated High Dynamic Range Video Encoding : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2AFA-0 %F EDOC: 231948 %F OTHER: Local-ID: C125675300671F7B-2BA4C8B1EE81007BC1256EC1003757E0-Mantiuk2004HDREnc %R 10.1145/1015706.1015794 %D 2004 %* Review method: peer-reviewed %X Due to rapid technological progress in high dynamic range (HDR)<br> video capture and display, the efficient storage and<br> transmission of such data is crucial for the completeness of any HDR<br> imaging pipeline. We propose a new approach for<br> inter-frame encoding of HDR video, which is embedded in the<br> well-established MPEG-4 video compression standard. The key<br> component of our technique is luminance quantization <br> that is optimized for the contrast threshold perception in the<br> human visual system. The quantization<br> scheme requires only 10--11 bits to encode 12 orders of magnitude of<br> visible luminance range and does not lead to perceivable contouring<br> artifacts. Besides video encoding, the proposed quantization<br> provides perceptually-optimized luminance sampling for fast<br> implementation of any<br> global tone mapping operator using a lookup table.<br> To improve the quality of synthetic video sequences, we introduce<br> a coding scheme for discrete cosine transform (DCT) blocks with<br> high contrast. We demonstrate the capabilities of HDR video in<br> a player, which enables decoding, tone mapping, and applying<br> post-processing effects in real-time. The tone mapping algorithm as well<br> as its parameters can be changed interactively while the video is playing.<br> We can simulate post-processing<br> effects such as glare, night vision, and motion blur, which appear<br> very realistic due to the usage of HDR data. %J ACM Transactions on Graphics %V 23 %N 3 %& 733 %P 733 - 741 %I Association for Computing Machinery %C New York, NY %@ false
Lipman, Y., Sorkine, O., Cohen-Or, D., Levin, D., Rössl, C., and Seidel, H.-P. 2004. Differential Coordinates for Interactive Mesh Editing. Shape Modeling International 2004 (SMI 2004), IEEE.
Abstract
One of the main challenges in editing a mesh is to retain the visual appearance <br>of the surface after applying various modifications. In this paper we advocate <br>the use of linear differential coordinates as means to preserve the <br>high-frequency detail of the surface. The differential coordinates represent <br>the details and are defined by a linear transformation of the mesh vertices. <br>This allows the reconstruction of the edited surface by solving a linear system <br>that satisfies the reconstruction of the local details in least squares sense. <br>Since the differential coordinates are defined in a global coordinate system <br>they are not rotation-invariant. To compensate for that, we rotate them to <br>agree with the rotation of an approximated local frame. We show that the linear <br>least squares system can be solved fast enough to guarantee interactive <br>response time thanks to a precomputed factorization of the coefficient matrix. <br>We demonstrate that our approach enables to edit complex detailed meshes while <br>keeping the shape of the details in their natural orientation.
Export
BibTeX
@inproceedings{Lipman-et-al_SMI04, TITLE = {Differential Coordinates for Interactive Mesh Editing}, AUTHOR = {Lipman, Yaron and Sorkine, Olga and Cohen-Or, Daniel and Levin, David and R{\"o}ssl, Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-1909-1}, DOI = {10.1109/SMI.2004.1314505}, LOCALID = {Local-ID: C125675300671F7B-9E569E3E56252F2FC1256E76002DDEAC-LSCLRS2004}, PUBLISHER = {IEEE}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {One of the main challenges in editing a mesh is to retain the visual appearance <br>of the surface after applying various modifications. In this paper we advocate <br>the use of linear differential coordinates as means to preserve the <br>high-frequency detail of the surface. The differential coordinates represent <br>the details and are defined by a linear transformation of the mesh vertices. <br>This allows the reconstruction of the edited surface by solving a linear system <br>that satisfies the reconstruction of the local details in least squares sense. <br>Since the differential coordinates are defined in a global coordinate system <br>they are not rotation-invariant. To compensate for that, we rotate them to <br>agree with the rotation of an approximated local frame. We show that the linear <br>least squares system can be solved fast enough to guarantee interactive <br>response time thanks to a precomputed factorization of the coefficient matrix. <br>We demonstrate that our approach enables to edit complex detailed meshes while <br>keeping the shape of the details in their natural orientation.}, BOOKTITLE = {Shape Modeling International 2004 (SMI 2004)}, EDITOR = {Giannini, Franca and Pasko, Alexander}, PAGES = {181--190}, ADDRESS = {Genova, Italy}, }
Endnote
%0 Conference Proceedings %A Lipman, Yaron %A Sorkine, Olga %A Cohen-Or, Daniel %A Levin, David %A R&#246;ssl, Christian %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Differential Coordinates for Interactive Mesh Editing : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-29C3-E %F EDOC: 231338 %F OTHER: Local-ID: C125675300671F7B-9E569E3E56252F2FC1256E76002DDEAC-LSCLRS2004 %R 10.1109/SMI.2004.1314505 %D 2004 %B 2004 International Conference on Shape Modeling and Applications %Z date of event: 2004-06-07 - 2004-06-09 %C Genova, Italy %X One of the main challenges in editing a mesh is to retain the visual appearance <br>of the surface after applying various modifications. In this paper we advocate <br>the use of linear differential coordinates as means to preserve the <br>high-frequency detail of the surface. The differential coordinates represent <br>the details and are defined by a linear transformation of the mesh vertices. <br>This allows the reconstruction of the edited surface by solving a linear system <br>that satisfies the reconstruction of the local details in least squares sense. <br>Since the differential coordinates are defined in a global coordinate system <br>they are not rotation-invariant. To compensate for that, we rotate them to <br>agree with the rotation of an approximated local frame. We show that the linear <br>least squares system can be solved fast enough to guarantee interactive <br>response time thanks to a precomputed factorization of the coefficient matrix. <br>We demonstrate that our approach enables to edit complex detailed meshes while <br>keeping the shape of the details in their natural orientation. %B Shape Modeling International 2004 %E Giannini, Franca; Pasko, Alexander %P 181 - 190 %I IEEE %@ 0-7695-1909-1
Li, M., Magnor, M., and Seidel, H.-P. 2004a. Hardware-accelerated Rendering of Photo Hulls. Computer Graphics Forum (Proc. EUROGRAPHICS 2004), Blackwell.
Export
BibTeX
@inproceedings{Li-et-al_EUROGRAPHICS04, TITLE = {Hardware-accelerated Rendering of Photo Hulls}, AUTHOR = {Li, Ming and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2004.00795.x}, LOCALID = {Local-ID: C125675300671F7B-5A5B2363CACB9CF3C1256E9E0053DFAD-Li2004HAPH}, PUBLISHER = {Blackwell}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2004}, DATE = {2004}, BOOKTITLE = {The European Association for Computer Graphics 25th Annual Conference (EUROGRAPHICS 2004)}, EDITOR = {Cani, Marie-Paule and Slater, Mel}, PAGES = {635--642}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {23}, ISSUE = {3}, ADDRESS = {Grenoble, France}, }
Endnote
%0 Conference Proceedings %A Li, Ming %A Magnor, Marcus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Hardware-accelerated Rendering of Photo Hulls : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2BAF-1 %F EDOC: 241615 %F OTHER: Local-ID: C125675300671F7B-5A5B2363CACB9CF3C1256E9E0053DFAD-Li2004HAPH %R 10.1111/j.1467-8659.2004.00795.x %D 2004 %B The European Association for Computer Graphics 25th Annual Conference %Z date of event: 2004-08-30 - %C Grenoble, France %B The European Association for Computer Graphics 25th Annual Conference %E Cani, Marie-Paule; Slater, Mel %P 635 - 642 %I Blackwell %J Computer Graphics Forum %V 23 %N 3 %I Blackwell-Wiley %@ false
Li, M., Magnor, M., and Seidel, H.-P. 2004b. A Hybrid Hardware-accelerated Algorithm for High Quality Rendering of Visual Hulls. Proceedings of Graphics Interface 2004 Conference (GI 2004), Canadian Human-Computer Communications Society.
Export
BibTeX
@inproceedings{Li-et-al_GI04, TITLE = {A Hybrid Hardware-accelerated Algorithm for High Quality Rendering of Visual Hulls}, AUTHOR = {Li, Ming and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-56881-227-4}, LOCALID = {Local-ID: C1256BDE005F57A8-7700D233A227872DC1256F3A0037AEB0-Li2004:HHAA}, PUBLISHER = {Canadian Human-Computer Communications Society}, YEAR = {2004}, DATE = {2004}, BOOKTITLE = {Proceedings of Graphics Interface 2004 Conference (GI 2004)}, EDITOR = {Heidrich, Wolfgang and Balakrishnan, Ravin}, PAGES = {41--48}, ADDRESS = {London, Canada}, }
Endnote
%0 Conference Proceedings %A Li, Ming %A Magnor, Marcus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Hybrid Hardware-accelerated Algorithm for High Quality Rendering of Visual Hulls : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2A0F-F %F EDOC: 231817 %F OTHER: Local-ID: C1256BDE005F57A8-7700D233A227872DC1256F3A0037AEB0-Li2004:HHAA %D 2004 %B Graphics Interface 2004 %Z date of event: 2004-05-17 - 2004-05-19 %C London, Canada %B Proceedings of Graphics Interface 2004 Conference %E Heidrich, Wolfgang; Balakrishnan, Ravin %P 41 - 48 %I Canadian Human-Computer Communications Society %@ 978-1-56881-227-4
Lee, Y., Lee, S., Shamir, A., Cohen-Or, D., and Seidel, H.-P. 2004. Intelligent Mesh Scissoring Using 3D Snakes. Proceedings of the 12th Pacific Conference on Computer Graphics and Applications (PG 2004), IEEE.
Export
BibTeX
@inproceedings{Lee-et-al_PG04, TITLE = {Intelligent Mesh Scissoring Using {3D} Snakes}, AUTHOR = {Lee, Yunjin and Lee, Seungyong and Shamir, Ariel and Cohen-Or, Daniel and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-2234-3}, DOI = {10.1109/PCCGA.2004.1348358}, LOCALID = {Local-ID: C125675300671F7B-74ACADB4F850911DC1256FB6004F5B52-LeeMeshScissoring04}, PUBLISHER = {IEEE}, YEAR = {2004}, DATE = {2004}, BOOKTITLE = {Proceedings of the 12th Pacific Conference on Computer Graphics and Applications (PG 2004)}, EDITOR = {Cohen-Or, Daniel and Ko, Hyeong-Seok and Terzopoulos, Demetri and Warren, Joe}, PAGES = {279--287}, ADDRESS = {Seoul, South Korea}, }
Endnote
%0 Conference Proceedings %A Lee, Yunjin %A Lee, Seungyong %A Shamir, Ariel %A Cohen-Or, Daniel %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Intelligent Mesh Scissoring Using 3D Snakes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2AC9-D %F EDOC: 231952 %F OTHER: Local-ID: C125675300671F7B-74ACADB4F850911DC1256FB6004F5B52-LeeMeshScissoring04 %R 10.1109/PCCGA.2004.1348358 %D 2004 %B 12th Pacific Conference on Computer Graphics and Applications %Z date of event: 2004-10-06 - 2004-10-08 %C Seoul, South Korea %B Proceedings of the 12th Pacific Conference on Computer Graphics and Applications %E Cohen-Or, Daniel; Ko, Hyeong-Seok; Terzopoulos, Demetri; Warren, Joe %P 279 - 287 %I IEEE %@ 0-7695-2234-3
Krislock, N., Lang, J., Varah, J., Pai, D.K., and Seidel, H.-P. 2004. Local Compliance Estimation via Positive Semidefinite Constrained Least Squares. IEEE Transactions on Robotics20, 6.
Export
BibTeX
@article{Krislock-et-al_TR04, TITLE = {Local Compliance Estimation via Positive Semidefinite Constrained Least Squares}, AUTHOR = {Krislock, Nathan and Lang, Jochen and Varah, Jim and Pai, Dinesh K. and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1042-296X}, DOI = {10.1109/TRO.2004.832794}, LOCALID = {Local-ID: C125675300671F7B-3DBA912A504642B3C1256FC200345290-Kris04}, PUBLISHER = {IEEE}, ADDRESS = {New York, NY}, YEAR = {2004}, DATE = {2004}, JOURNAL = {IEEE Transactions on Robotics}, VOLUME = {20}, NUMBER = {6}, PAGES = {1007--1011}, }
Endnote
%0 Journal Article %A Krislock, Nathan %A Lang, Jochen %A Varah, Jim %A Pai, Dinesh K. %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Local Compliance Estimation via Positive Semidefinite Constrained Least Squares : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2AD4-4 %F EDOC: 231954 %F OTHER: Local-ID: C125675300671F7B-3DBA912A504642B3C1256FC200345290-Kris04 %R 10.1109/TRO.2004.832794 %D 2004 %* Review method: peer-reviewed %J IEEE Transactions on Robotics %O IEEE Transactions on Robotics and Automation %V 20 %N 6 %& 1007 %P 1007 - 1011 %I IEEE %C New York, NY %@ false
Krawczyk, G., Mantiuk, R., Myszkowski, K., and Seidel, H.-P. 2004. Lightness Perception Inspired Tone Mapping. Proceedings APGV 2004, ACM.
Export
BibTeX
@inproceedings{Krawczyk2004, TITLE = {Lightness Perception Inspired Tone Mapping}, AUTHOR = {Krawczyk, Grzegorz and Mantiuk, Rafal and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-58113-914-3}, DOI = {10.1145/1012551.1012594}, LOCALID = {Local-ID: C125675300671F7B-07985C48329EC4DFC1256FC4002A5333-Krawczyk2004}, PUBLISHER = {ACM}, YEAR = {2004}, DATE = {2004}, BOOKTITLE = {Proceedings APGV 2004}, EDITOR = {Spencer, Stephen N.}, PAGES = {172--172}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Krawczyk, Grzegorz %A Mantiuk, Rafal %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Lightness Perception Inspired Tone Mapping : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2543-E %F EDOC: 231335 %R 10.1145/1012551.1012594 %F OTHER: Local-ID: C125675300671F7B-07985C48329EC4DFC1256FC4002A5333-Krawczyk2004 %D 2004 %B 1st Symposium on Applied Perception in Graphics and Visualization %Z date of event: 2004-08-07 - 2004-08-08 %C Los Angeles, CA, USA %B Proceedings APGV 2004 %E Spencer, Stephen N. %P 172 - 172 %I ACM %@ 978-1-58113-914-3
Koster, M., Haber, J., and Seidel, H.-P. 2004. Real-time Rendering of Human Hair using Programmable Graphics Hardware. Proceedings of the 2004 Computer Graphics International Conference (CGI 2004), IEEE.
Abstract
We present a hair model together with rendering algorithms suitable for <br>real-time rendering. In our approach, we take into account the major lighting <br>factors contributing to a realistic appearance of human hair: anisotropic <br>reflection and self-shadowing. To deal with the geometric complexity of human <br>hair, we combine single hair fibers into hair wisps, which are represented by <br>textured triangle strips. Our rendering algorithms<br>use OpenGL extensions to achieve real-time performance on recent commodity <br>graphics boards. We demonstrate the applicability of our hair model for a <br>variety of different hairstyles.
Export
BibTeX
@inproceedings{Koster-et-al_CGI04, TITLE = {Real-time Rendering of Human Hair using Programmable Graphics Hardware}, AUTHOR = {Koster, Martin and Haber, J{\"o}rg and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-2171-1}, DOI = {10.1109/CGI.2004.1309217}, LOCALID = {Local-ID: C125675300671F7B-77A50156C42136EDC1256E3E00448E2F-Koster:RTRHH}, PUBLISHER = {IEEE}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {We present a hair model together with rendering algorithms suitable for <br>real-time rendering. In our approach, we take into account the major lighting <br>factors contributing to a realistic appearance of human hair: anisotropic <br>reflection and self-shadowing. To deal with the geometric complexity of human <br>hair, we combine single hair fibers into hair wisps, which are represented by <br>textured triangle strips. Our rendering algorithms<br>use OpenGL extensions to achieve real-time performance on recent commodity <br>graphics boards. We demonstrate the applicability of our hair model for a <br>variety of different hairstyles.}, BOOKTITLE = {Proceedings of the 2004 Computer Graphics International Conference (CGI 2004)}, EDITOR = {Cohen-Or, Daniel and Jain, Lakhmi and Magnenat-Thalmann, Nadia}, PAGES = {248--256}, ADDRESS = {Crete, Greece}, }
Endnote
%0 Conference Proceedings %A Koster, Martin %A Haber, J&#246;rg %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Real-time Rendering of Human Hair using Programmable Graphics Hardware : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2B15-8 %F EDOC: 231907 %F OTHER: Local-ID: C125675300671F7B-77A50156C42136EDC1256E3E00448E2F-Koster:RTRHH %R 10.1109/CGI.2004.1309217 %D 2004 %B Computer Graphics International 2004 %Z date of event: 2004-06-16 - 2004-06-19 %C Crete, Greece %X We present a hair model together with rendering algorithms suitable for <br>real-time rendering. In our approach, we take into account the major lighting <br>factors contributing to a realistic appearance of human hair: anisotropic <br>reflection and self-shadowing. To deal with the geometric complexity of human <br>hair, we combine single hair fibers into hair wisps, which are represented by <br>textured triangle strips. Our rendering algorithms<br>use OpenGL extensions to achieve real-time performance on recent commodity <br>graphics boards. We demonstrate the applicability of our hair model for a <br>variety of different hairstyles. %B Proceedings of the 2004 Computer Graphics International Conference %E Cohen-Or, Daniel; Jain, Lakhmi; Magnenat-Thalmann, Nadia %P 248 - 256 %I IEEE %@ 0-7695-2171-1
Kondratieva, P., Havran, V., and Seidel, H.-P. 2004. Effective Use of Procedural Shaders in Animated Scenes. Computational Science — ICCS 2004, Springer.
Export
BibTeX
@inproceedings{Kondratieva-et-al_ICCS04, TITLE = {Effective Use of Procedural Shaders in Animated Scenes}, AUTHOR = {Kondratieva, Polina and Havran, Vlastimil and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0302-9743}, ISBN = {3-540-22129-8}, LOCALID = {Local-ID: C125675300671F7B-8570F665939C9085C1256F7F00443FF2-Kondratieva2004}, PUBLISHER = {Springer}, YEAR = {2004}, DATE = {2004}, BOOKTITLE = {Computational Science --- ICCS 2004}, EDITOR = {Bubak, Marian and van Albada, Geert Dick and Sloot, Peter M. A. and Dongarra, Jack J.}, PAGES = {164--172}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {3039}, ADDRESS = {Krak{\'o}w, Poland}, }
Endnote
%0 Conference Proceedings %A Kondratieva, Polina %A Havran, Vlastimil %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Effective Use of Procedural Shaders in Animated Scenes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2A7D-8 %F EDOC: 231955 %F OTHER: Local-ID: C125675300671F7B-8570F665939C9085C1256F7F00443FF2-Kondratieva2004 %D 2004 %B 4th International Conference on Computational Science %Z date of event: 2004-06-06 - 2004-06-09 %C Krak&#243;w, Poland %B Computational Science &#8212; ICCS 2004 %E Bubak, Marian; van Albada, Geert Dick; Sloot, Peter M. A.; Dongarra, Jack J. %P 164 - 172 %I Springer %@ 3-540-22129-8 %B Lecture Notes in Computer Science %N 3039 %@ false %U https://rdcu.be/dEy8d
Kautz, J., Sattler, M., Sarlette, R., Klein, R., and Seidel, H.-P. 2004a. Decoupling BRDFs from Surface Mesostructures. Proceedings of the Graphics Interface 2004 Conference (GI 2004), Canadian Human-Computer Communications Society.
Export
BibTeX
@inproceedings{DBLP:conf/graphicsinterface/KautzSSKS04, TITLE = {Decoupling {BRDFs} from Surface Mesostructures}, AUTHOR = {Kautz, Jan and Sattler, Mirko and Sarlette, Ralf and Klein, Reinhard and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-56881-227-4}, DOI = {10.5555/1006058.1006080}, PUBLISHER = {Canadian Human-Computer Communications Society}, YEAR = {2004}, DATE = {2004}, BOOKTITLE = {Proceedings of the Graphics Interface 2004 Conference (GI 2004)}, EDITOR = {Heidrich, Wolfgang and Balakrishnan, Ravin}, PAGES = {177--184}, ADDRESS = {London, Ontario, Canada}, }
Endnote
%0 Conference Proceedings %A Kautz, Jan %A Sattler, Mirko %A Sarlette, Ralf %A Klein, Reinhard %A Seidel, Hans-Peter %+ External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Decoupling BRDFs from Surface Mesostructures : %G eng %U http://hdl.handle.net/21.11116/0000-000F-2726-B %R 10.5555/1006058.1006080 %D 2004 %B Graphics Interface 2004 %Z date of event: 2004-05-17 - 2004-05-19 %C London, Ontario, Canada %B Proceedings of the Graphics Interface 2004 Conference %E Heidrich, Wolfgang; Balakrishnan, Ravin %P 177 - 184 %I Canadian Human-Computer Communications Society %@ 978-1-56881-227-4
Kautz, J., Daubert, K., and Seidel, H.-P. 2004b. Advanced Environment Mapping in VR Applications. Computers and Graphics28, 1.
Export
BibTeX
@article{DBLP:journals/cg/KautzDS04, TITLE = {Advanced Environment Mapping in {VR} Applications}, AUTHOR = {Kautz, Jan and Daubert, Katja and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0097-8493}, DOI = {10.1016/J.CAG.2003.10.010}, PUBLISHER = {Pergamon}, ADDRESS = {New York}, YEAR = {2004}, DATE = {2004}, JOURNAL = {Computers and Graphics}, VOLUME = {28}, NUMBER = {1}, PAGES = {99--104}, }
Endnote
%0 Journal Article %A Kautz, Jan %A Daubert, Katja %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Advanced Environment Mapping in VR Applications : %G eng %U http://hdl.handle.net/21.11116/0000-000F-293A-3 %R 10.1016/J.CAG.2003.10.010 %D 2004 %J Computers and Graphics %V 28 %N 1 %& 99 %P 99 - 104 %I Pergamon %C New York %@ false
Ivrissimtzis, I., Zayer, R., and Seidel, H.-P. 2004a. Polygonal Decomposition of the 1-Ring Neighborhood of the Catmull-Clark Scheme. Shape Modeling International 2004 (SMI 2004), IEEE.
Export
BibTeX
@inproceedings{Ivrissimtzis-et-al_SMI04, TITLE = {Polygonal Decomposition of the 1-Ring Neighborhood of the Catmull-Clark Scheme}, AUTHOR = {Ivrissimtzis, Ioannis and Zayer, Rhaleb and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-2075-8}, DOI = {10.1109/SMI.2004.1314497}, LOCALID = {Local-ID: C125675300671F7B-F2A809A6970CE057C1256E7A0059D89A-izs2004a}, PUBLISHER = {IEEE}, YEAR = {2004}, DATE = {2004}, BOOKTITLE = {Shape Modeling International 2004 (SMI 2004)}, EDITOR = {Giannini, Franca and Pasko, Alexander}, PAGES = {101--109}, ADDRESS = {Genoa, Italy}, }
Endnote
%0 Conference Proceedings %A Ivrissimtzis, Ioannis %A Zayer, Rhaleb %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Polygonal Decomposition of the 1-Ring Neighborhood of the Catmull-Clark Scheme : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2AFC-C %F EDOC: 231908 %F OTHER: Local-ID: C125675300671F7B-F2A809A6970CE057C1256E7A0059D89A-izs2004a %R 10.1109/SMI.2004.1314497 %D 2004 %B 2004 International Conference on Shape Modeling and Applications %Z date of event: 2004-06-07 - 2004-06-09 %C Genoa, Italy %B Shape Modeling International 2004 %E Giannini, Franca; Pasko, Alexander %P 101 - 109 %I IEEE %@ 0-7695-2075-8
Ivrissimtzis, I., Jeong, W.-K., Lee, S., Lee, Y., and Seidel, H.-P. 2004b. Neural meshes: surface reconstruction with a learning algorithm. Max-Planck-Institut für Informatik, Saarbrücken.
Export
BibTeX
@techreport{, TITLE = {Neural meshes: surface reconstruction with a learning algorithm}, AUTHOR = {Ivrissimtzis, Ioannis and Jeong, Won-Ki and Lee, Seungyong and Lee, Yunjin and Seidel, Hans-Peter}, LANGUAGE = {eng}, NUMBER = {MPI-I-2004-4-005}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2004}, DATE = {2004-10}, TYPE = {Research Report}, }
Endnote
%0 Report %A Ivrissimtzis, Ioannis %A Jeong, Won-Ki %A Lee, Seungyong %A Lee, Yunjin %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Neural meshes: surface reconstruction with a learning algorithm : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-28C9-A %F EDOC: 237862 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2004 %P 16 p. %B Research Report
Ivrissimtzis, I. and Seidel, H.-P. 2004. Evolutions of Polygons in the Study of Subdivision Surfaces. Computing72.
Export
BibTeX
@article{Ivrissimtzis-Seidel_Computing04, TITLE = {Evolutions of Polygons in the Study of Subdivision Surfaces}, AUTHOR = {Ivrissimtzis, Ioannis and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0010-485X}, DOI = {10.1007/s00607-003-0049-8}, LOCALID = {Local-ID: C125675300671F7B-61C4523A9E42E525C1256E7A0056B092-is2004a}, PUBLISHER = {Springer}, ADDRESS = {Berlin}, YEAR = {2004}, DATE = {2004}, JOURNAL = {Computing}, VOLUME = {72}, PAGES = {93--103}, }
Endnote
%0 Journal Article %A Ivrissimtzis, Ioannis %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Evolutions of Polygons in the Study of Subdivision Surfaces : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2A8D-6 %F EDOC: 231958 %F OTHER: Local-ID: C125675300671F7B-61C4523A9E42E525C1256E7A0056B092-is2004a %R 10.1007/s00607-003-0049-8 %D 2004 %* Review method: peer-reviewed %J Computing %V 72 %& 93 %P 93 - 103 %I Springer %C Berlin %@ false %U https://rdcu.be/dET0k
Ivrissimtzis, I., Lee, Y., Lee, S., Jeong, W.-K., and Seidel, H.-P. 2004c. Neural Mesh Ensembles. Proceedings of the 2nd International Symposium on 3D Data Processing, Visualization, and Transmission (3DPVT 2004), IEEE.
Export
BibTeX
@inproceedings{Ivrissimtzis-et-al_3DPVT04, TITLE = {Neural Mesh Ensembles}, AUTHOR = {Ivrissimtzis, Ioannis and Lee, Yunjin and Lee, Seungyong and Jeong, Won-Ki and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-2223-8}, DOI = {10.1109/TDPVT.2004.1335216}, LOCALID = {Local-ID: C125675300671F7B-FE345AF1343CDBBAC1256F2800476B79-illjs04}, PUBLISHER = {IEEE}, YEAR = {2004}, DATE = {2004}, BOOKTITLE = {Proceedings of the 2nd International Symposium on 3D Data Processing, Visualization, and Transmission (3DPVT 2004)}, EDITOR = {Aloimonos, Yannis and Taubin, Gabriel}, PAGES = {308--315}, ADDRESS = {Thessaloniki, Greece}, }
Endnote
%0 Conference Proceedings %A Ivrissimtzis, Ioannis %A Lee, Yunjin %A Lee, Seungyong %A Jeong, Won-Ki %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Neural Mesh Ensembles : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2AE1-6 %F EDOC: 231960 %F OTHER: Local-ID: C125675300671F7B-FE345AF1343CDBBAC1256F2800476B79-illjs04 %R 10.1109/TDPVT.2004.1335216 %D 2004 %B 2nd International Symposium on 3D Data Processing, Visualization, and Transmission %Z date of event: 2004-09-06 - 2004-09-09 %C Thessaloniki, Greece %B Proceedings of the 2nd International Symposium on 3D Data Processing, Visualization, and Transmission %E Aloimonos, Yannis; Taubin, Gabriel %P 308 - 315 %I IEEE %@ 0-7695-2223-8
Havran, V., Bittner, J., and Seidel, H.-P. 2004. Ray Maps for Global Illumination. ACM SIGGRAPH 2004 Sketches, ACM.
Export
BibTeX
@inproceedings{DBLP:conf/siggraph/HavranBS04, TITLE = {Ray Maps for Global Illumination}, AUTHOR = {Havran, Vlastimil and Bittner, Jiri and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-58113-896-2}, DOI = {10.1145/1186223.1186319}, PUBLISHER = {ACM}, YEAR = {2004}, DATE = {2004}, BOOKTITLE = {ACM SIGGRAPH 2004 Sketches}, EDITOR = {Barzel, Ronen}, PAGES = {77}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Havran, Vlastimil %A Bittner, Jiri %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Ray Maps for Global Illumination : %G eng %U http://hdl.handle.net/21.11116/0000-000F-1D99-5 %R 10.1145/1186223.1186319 %D 2004 %B 2004 International Conference on Computer Graphics and Interactive Techniques %Z date of event: 2004-08-08 - 2004-08-12 %C Los Angeles, CA, USA %B ACM SIGGRAPH 2004 Sketches %E Barzel, Ronen %P 77 %I ACM %@ 978-1-58113-896-2
Hangelbroek, T., Nürnberger, G., Rössl, C., Seidel, H.-P., and Zeilfelder, F. 2004. Dimension of C1-Splines on Type-6 Tetrahedral Partitions. Journal of Approximation Theory131, 2.
Export
BibTeX
@article{HNRSZ:2004, TITLE = {Dimension of C^{1}-Splines on Type-6 Tetrahedral Partitions}, AUTHOR = {Hangelbroek, Thomas and N{\"u}rnberger, G{\"u}nther and R{\"o}ssl, Christian and Seidel, Hans-Peter and Zeilfelder, Frank}, LANGUAGE = {eng}, ISSN = {0021-9045}, DOI = {10.1016/j.jat.2004.09.002}, PUBLISHER = {Academic Press}, ADDRESS = {Orlando, Fla.}, YEAR = {2004}, DATE = {2004}, JOURNAL = {Journal of Approximation Theory}, VOLUME = {131}, NUMBER = {2}, PAGES = {157--184}, }
Endnote
%0 Journal Article %A Hangelbroek, Thomas %A N&#252;rnberger, G&#252;nther %A R&#246;ssl, Christian %A Seidel, Hans-Peter %A Zeilfelder, Frank %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Dimension of C1-Splines on Type-6 Tetrahedral Partitions : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0023-EA93-4 %R 10.1016/j.jat.2004.09.002 %7 2004-12-08 %D 2004 %J Journal of Approximation Theory %V 131 %N 2 %& 157 %P 157 - 184 %I Academic Press %C Orlando, Fla. %@ false %U https://doi.org/10.1016/j.jat.2004.09.002
Haber, J., Schmitt, C., Koster, M., and Seidel, H.-P. 2004. Modeling hair using a wisp hair model. Max-Planck-Institut für Informatik, Saarbrücken.
Export
BibTeX
@techreport{, TITLE = {Modeling hair using a wisp hair model}, AUTHOR = {Haber, J{\"o}rg and Schmitt, Carina and Koster, Martin and Seidel, Hans-Peter}, LANGUAGE = {eng}, NUMBER = {MPI-I-2004-4-001}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2004}, DATE = {2004-05}, TYPE = {Max-Planck-Institut f&#252;r Informatik <Saarbr&#252;cken>: Research Report}, EDITOR = {{Max-Planck-Institut f{\"u}r Informatik {\textless}Saarbr{\"u}cken{\textgreater}}}, }
Endnote
%0 Report %A Haber, J&#246;rg %A Schmitt, Carina %A Koster, Martin %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Modeling hair using a wisp hair model : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-28F6-4 %F EDOC: 237864 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2004 %P 38 p. %B Max-Planck-Institut f&#252;r Informatik <Saarbr&#252;cken>: Research Report
Gumhold, S. 2004. Hierarchical Shape-Adaptive Quantization for Geometry Compression. Vision, modeling, and visualization 2004 (VMV-04), Akademische Verlagsgesellschaft Aka.
Abstract
The compression of polygonal mesh geometry is still an active field of research as in 3d no theoretical bounds are known. This work proposes a geometry coding method based on predictive coding. Instead of using the vertex to vertex distance as distortion measurement, an approximation to the Hausdorffdistance is used resulting in additional degrees of freedom. These are exploited by a new adaptive quantization approach, which is independent of the encoding order. The achieved compression rates are similar to those of entropy based optimization but with a significantly faster compression performance.
Export
BibTeX
@inproceedings{Gumhold2004, TITLE = {Hierarchical Shape-Adaptive Quantization for Geometry Compression}, AUTHOR = {Gumhold, Stefan}, EDITOR = {Girod, Bernd and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-89838-058-0}, LOCALID = {Local-ID: C125675300671F7B-D79543C9FC6FF6D4C1256F7000335C88-Gumhold2004}, PUBLISHER = {Akademische Verlagsgesellschaft Aka}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {The compression of polygonal mesh geometry is still an active field of research as in 3d no theoretical bounds are known. This work proposes a geometry coding method based on predictive coding. Instead of using the vertex to vertex distance as distortion measurement, an approximation to the Hausdorffdistance is used resulting in additional degrees of freedom. These are exploited by a new adaptive quantization approach, which is independent of the encoding order. The achieved compression rates are similar to those of entropy based optimization but with a significantly faster compression performance.}, BOOKTITLE = {Vision, modeling, and visualization 2004 (VMV-04)}, PAGES = {293--298}, }
Endnote
%0 Conference Proceedings %A Gumhold, Stefan %E Girod, Bernd %E Magnor, Marcus %E Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Hierarchical Shape-Adaptive Quantization for Geometry Compression : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2AB3-E %F EDOC: 231963 %F OTHER: Local-ID: C125675300671F7B-D79543C9FC6FF6D4C1256F7000335C88-Gumhold2004 %I Akademische Verlagsgesellschaft Aka %D 2004 %B Untitled Event %Z date of event: 2004-11-16 - %C Stanford, USA %X The compression of polygonal mesh geometry is still an active field of research as in 3d no theoretical bounds are known. This work proposes a geometry coding method based on predictive coding. Instead of using the vertex to vertex distance as distortion measurement, an approximation to the Hausdorffdistance is used resulting in additional degrees of freedom. These are exploited by a new adaptive quantization approach, which is independent of the encoding order. The achieved compression rates are similar to those of entropy based optimization but with a significantly faster compression performance. %B Vision, modeling, and visualization 2004 (VMV-04) %P 293 - 298 %I Akademische Verlagsgesellschaft Aka %@ 3-89838-058-0
Goesele, M., Lensch, H.P.A., and Seidel, H.-P. 2004a. Validation of Color Managed 3D Appearance Acquisition. Color science and engineering: Systems, technologies, applications, IS&T.
Abstract
Image-based appearance acquisition algorithms are<br>able to generate realistic 3D models of real objects but<br>have previously not taken care of calibrated color space.<br>We integrate a color managed high-dynamic range imaging<br>technique into a recent appearance acquisition algorithm<br>and generate models in CIE XYZ color space. We<br>compare the final models with spectrophotometric measurements<br>and compute difference images between renderings<br>and ground truth images. Displayed renderings and<br>printouts are compared to the original objects under identical<br>illumination conditions to evaluate and validate the<br>complete appearance reproduction pipeline. Working in<br>CIE XYZ color space allows for expressing the perceivable<br>differences in a standardized measure.
Export
BibTeX
@inproceedings{Goesele-et-al_IST/SID04, TITLE = {Validation of Color Managed {3D} Appearance Acquisition}, AUTHOR = {Goesele, Michael and Lensch, Hendrik P. A. and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-89208-254-2}, DOI = {10.2352/CIC.2004.12.1.art00047}, LOCALID = {Local-ID: C125675300671F7B-46DC33A68CDB6034C1256F05006AC75D-Goesele:2004:VOC}, PUBLISHER = {IS\&T}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {Image-based appearance acquisition algorithms are<br>able to generate realistic 3D models of real objects but<br>have previously not taken care of calibrated color space.<br>We integrate a color managed high-dynamic range imaging<br>technique into a recent appearance acquisition algorithm<br>and generate models in CIE XYZ color space. We<br>compare the final models with spectrophotometric measurements<br>and compute difference images between renderings<br>and ground truth images. Displayed renderings and<br>printouts are compared to the original objects under identical<br>illumination conditions to evaluate and validate the<br>complete appearance reproduction pipeline. Working in<br>CIE XYZ color space allows for expressing the perceivable<br>differences in a standardized measure.}, BOOKTITLE = {Color science and engineering: Systems, technologies, applications}, PAGES = {265--270}, ADDRESS = {Scottsdale, Arizona, USA}, }
Endnote
%0 Conference Proceedings %A Goesele, Michael %A Lensch, Hendrik P. A. %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Validation of Color Managed 3D Appearance Acquisition : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-29BB-2 %F EDOC: 231322 %F OTHER: Local-ID: C125675300671F7B-46DC33A68CDB6034C1256F05006AC75D-Goesele:2004:VOC %R 10.2352/CIC.2004.12.1.art00047 %D 2004 %B 2th Color and Imaging Conference %Z date of event: 2004-11-09 - 2004-11-12 %C Scottsdale, Arizona, USA %X Image-based appearance acquisition algorithms are<br>able to generate realistic 3D models of real objects but<br>have previously not taken care of calibrated color space.<br>We integrate a color managed high-dynamic range imaging<br>technique into a recent appearance acquisition algorithm<br>and generate models in CIE XYZ color space. We<br>compare the final models with spectrophotometric measurements<br>and compute difference images between renderings<br>and ground truth images. Displayed renderings and<br>printouts are compared to the original objects under identical<br>illumination conditions to evaluate and validate the<br>complete appearance reproduction pipeline. Working in<br>CIE XYZ color space allows for expressing the perceivable<br>differences in a standardized measure. %B Color science and engineering: Systems, technologies, applications %P 265 - 270 %I IS&T %@ 0-89208-254-2
Goesele, M., Lensch, H.P.A., Lang, J., Fuchs, C., and Seidel, H.-P. 2004b. DISCO - Acquisition of Translucent Objects. ACM Transactions on Graphics23, 3.
Abstract
Translucent objects are characterized by diffuse light scattering<br>beneath the object's surface. Light enters and leaves an object at<br>possibly distinct surface locations. This paper presents the first<br>method to acquire this transport behavior for arbitrary<br>inhomogeneous objects. Individual surface points are illuminated in<br>our DISCO measurement facility and the object's impulse response is<br>recorded with a high-dynamic range video camera. The acquired data<br>is resampled into a hierarchical model of the object's light<br>scattering properties. Missing values are consistently interpolated<br>resulting in measurement-based, complete and accurate<br>representations of real translucent objects which can be rendered<br>with various algorithms.
Export
BibTeX
@article{Goesele-et-al_TG04, TITLE = {{DISCO} -- Acquisition of Translucent Objects}, AUTHOR = {Goesele, Michael and Lensch, Hendrik P. A. and Lang, Jochen and Fuchs, Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/1015706.1015807}, LOCALID = {Local-ID: C125675300671F7B-8859179F7A701C6BC1256E6E0070994A-Goesele:2004:DAT}, PUBLISHER = {Association for Computing Machinery}, ADDRESS = {New York, NY}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {Translucent objects are characterized by diffuse light scattering<br>beneath the object's surface. Light enters and leaves an object at<br>possibly distinct surface locations. This paper presents the first<br>method to acquire this transport behavior for arbitrary<br>inhomogeneous objects. Individual surface points are illuminated in<br>our DISCO measurement facility and the object's impulse response is<br>recorded with a high-dynamic range video camera. The acquired data<br>is resampled into a hierarchical model of the object's light<br>scattering properties. Missing values are consistently interpolated<br>resulting in measurement-based, complete and accurate<br>representations of real translucent objects which can be rendered<br>with various algorithms.}, JOURNAL = {ACM Transactions on Graphics}, EDITOR = {Marks, Joe}, VOLUME = {23}, NUMBER = {3}, PAGES = {835--844}, }
Endnote
%0 Journal Article %A Goesele, Michael %A Lensch, Hendrik P. A. %A Lang, Jochen %A Fuchs, Christian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T DISCO - Acquisition of Translucent Objects : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-29B9-6 %F EDOC: 231321 %F OTHER: Local-ID: C125675300671F7B-8859179F7A701C6BC1256E6E0070994A-Goesele:2004:DAT %R 10.1145/1015706.1015807 %D 2004 %* Review method: peer-reviewed %X Translucent objects are characterized by diffuse light scattering<br>beneath the object's surface. Light enters and leaves an object at<br>possibly distinct surface locations. This paper presents the first<br>method to acquire this transport behavior for arbitrary<br>inhomogeneous objects. Individual surface points are illuminated in<br>our DISCO measurement facility and the object's impulse response is<br>recorded with a high-dynamic range video camera. The acquired data<br>is resampled into a hierarchical model of the object's light<br>scattering properties. Missing values are consistently interpolated<br>resulting in measurement-based, complete and accurate<br>representations of real translucent objects which can be rendered<br>with various algorithms. %J ACM Transactions on Graphics %V 23 %N 3 %& 835 %P 835 - 844 %I Association for Computing Machinery %C New York, NY %@ false
Girod, B., Magnor, M.A., and Seidel, H.-P., eds. 2004. Vision, Modeling, and Visualization 2004. Akademische Verlagsgsellschaft Aka GmbH.
Export
BibTeX
@proceedings{DBLP:conf/vmv/2004, TITLE = {Vision, Modeling, and Visualization 2004}, EDITOR = {Girod, Bernd and Magnor, Marcus A. and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-89838-058-0}, PUBLISHER = {Akademische Verlagsgsellschaft Aka GmbH}, YEAR = {2004}, DATE = {2004}, ADDRESS = {Stanford, CA, USA}, }
Endnote
%0 Conference Proceedings %E Girod, Bernd %E Magnor, Marcus A. %E Seidel, Hans-Peter %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Vision, Modeling, and Visualization 2004 : %G eng %U http://hdl.handle.net/21.11116/0000-000F-1393-5 %@ 3-89838-058-0 %I Akademische Verlagsgsellschaft Aka GmbH %D 2004 %B 9th International Fall Workshop on Vision, Modeling, and Visualization %Z date of event: 2004-11-16 - 2004-11-18 %D 2004 %C Stanford, CA, USA
Fuchs, T., Haber, J., and Seidel, H.-P. 2004. MIMIC - A Language for Specifying Facial Animations. Journal of WSCG (Proc. WSCG 2004), UNION Agency.
Abstract
This paper introduces a versatile language for specifying facial<br>animations. The language MIMIC can be used together with any facial animation <br>system that employs animation parameters varying over time to control the <br>animation. In addition to the automatic alignment of individual actions, the <br>user can fine-tune the temporal alignment of actions relatively to each other. <br>A set of pre-defined functions can be used to control oscillatory behavior of <br>actions. Temporal constraints are resolved automatically by the MIMIC compiler. <br>We describe the grammar of MIMIC, give some hints on the implementation of the <br>MIMIC compiler, and show some examples of animation code together with <br>snapshots from the resulting animation.
Export
BibTeX
@inproceedings{Fuchs-et-al_WSCG04, TITLE = {{MIMIC} -- A Language for Specifying Facial Animations}, AUTHOR = {Fuchs, Thomas and Haber, J{\"o}rg and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1213 -- 6972}, ISBN = {80-903100-5-2}, LOCALID = {Local-ID: C125675300671F7B-8F80D17E6FBE0E93C1256E3E004405AE-Fuchs:Mimic}, PUBLISHER = {UNION Agency}, PUBLISHER = {UNION Agency}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {This paper introduces a versatile language for specifying facial<br>animations. The language MIMIC can be used together with any facial animation <br>system that employs animation parameters varying over time to control the <br>animation. In addition to the automatic alignment of individual actions, the <br>user can fine-tune the temporal alignment of actions relatively to each other. <br>A set of pre-defined functions can be used to control oscillatory behavior of <br>actions. Temporal constraints are resolved automatically by the MIMIC compiler. <br>We describe the grammar of MIMIC, give some hints on the implementation of the <br>MIMIC compiler, and show some examples of animation code together with <br>snapshots from the resulting animation.}, BOOKTITLE = {Proceedings of the 12th International Conference in Central Europe on Computer Graphics, Visualization and Computer Vision (WSCG 2004)}, PAGES = {71--78}, JOURNAL = {Journal of WSCG (Proc. WSCG)}, VOLUME = {12}, ISSUE = {1-3}, ADDRESS = {Plzen, Czech Republic}, }
Endnote
%0 Conference Proceedings %A Fuchs, Thomas %A Haber, J&#246;rg %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T MIMIC - A Language for Specifying Facial Animations : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2ADA-7 %F EDOC: 231964 %F OTHER: Local-ID: C125675300671F7B-8F80D17E6FBE0E93C1256E3E004405AE-Fuchs:Mimic %D 2004 %B The 12th International Conference in Central Europe on Computer Graphics, Visualization and Computer Vision %Z date of event: 2004-02-02 - 2004-02-06 %C Plzen, Czech Republic %X This paper introduces a versatile language for specifying facial<br>animations. The language MIMIC can be used together with any facial animation <br>system that employs animation parameters varying over time to control the <br>animation. In addition to the automatic alignment of individual actions, the <br>user can fine-tune the temporal alignment of actions relatively to each other. <br>A set of pre-defined functions can be used to control oscillatory behavior of <br>actions. Temporal constraints are resolved automatically by the MIMIC compiler. <br>We describe the grammar of MIMIC, give some hints on the implementation of the <br>MIMIC compiler, and show some examples of animation code together with <br>snapshots from the resulting animation. %B Proceedings of the 12th International Conference in Central Europe on Computer Graphics, Visualization and Computer Vision %P 71 - 78 %I UNION Agency %@ 80-903100-5-2 %J Journal of WSCG %V 12 %N 1-3 %I UNION Agency %@ false
Dmitriev, K. and Seidel, H.-P. 2004. Progressive Path Tracing with Lightweight Local Error Estimation. Vision, modeling, and visualization 2004 (VMV 2004), Akademische Verlagsgesellschaft Aka.
Abstract
Adaptive sampling techniques typically applied in path tracing are not<br>progressive. The reason is that they need all the samples used to compute<br>pixel color for error estimation. Thus progressive computation would need<br>to store all the samples for all the pixels, which is too expensive.<br>Absence of progressivity is a big disadvantage of adaptive path tracing<br>algorithms because a user may become aware of some unwanted effects on the<br>image only after quite significant time. We propose a new estimate of local<br>error in path tracing. The new technique happens to be lightweight in terms<br>of both memory and execution time and lends itself very well to<br>progressivity. Also, even thought perceptual error metric is used, it<br>allows changes of any tone mapping parameters during the course of<br>computation. In this case none of the previous effort is lost, error<br>distribution is immediately updated and used for refining the solution.
Export
BibTeX
@inproceedings{Dmitriev-Seidel_VMV04, TITLE = {Progressive Path Tracing with Lightweight Local Error Estimation}, AUTHOR = {Dmitriev, Kirill and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-89838-058-0}, LOCALID = {Local-ID: C125675300671F7B-80DB4106BB068C02C1256F5E004741EF-dmitrie04ppt}, PUBLISHER = {Akademische Verlagsgesellschaft Aka}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {Adaptive sampling techniques typically applied in path tracing are not<br>progressive. The reason is that they need all the samples used to compute<br>pixel color for error estimation. Thus progressive computation would need<br>to store all the samples for all the pixels, which is too expensive.<br>Absence of progressivity is a big disadvantage of adaptive path tracing<br>algorithms because a user may become aware of some unwanted effects on the<br>image only after quite significant time. We propose a new estimate of local<br>error in path tracing. The new technique happens to be lightweight in terms<br>of both memory and execution time and lends itself very well to<br>progressivity. Also, even thought perceptual error metric is used, it<br>allows changes of any tone mapping parameters during the course of<br>computation. In this case none of the previous effort is lost, error<br>distribution is immediately updated and used for refining the solution.}, BOOKTITLE = {Vision, modeling, and visualization 2004 (VMV 2004)}, EDITOR = {Girod, Bernd and Magnor, Marcus A. and Seidel, Hans-Peter}, PAGES = {249--254}, ADDRESS = {Stanford, CA, USA}, }
Endnote
%0 Conference Proceedings %A Dmitriev, Kirill %A Seidel, Hans-Peter %+ Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Progressive Path Tracing with Lightweight Local Error Estimation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2B05-C %F EDOC: 232047 %F OTHER: Local-ID: C125675300671F7B-80DB4106BB068C02C1256F5E004741EF-dmitrie04ppt %D 2004 %B 9th International Fall Workshop on Vision, Modeling, and Visualization %Z date of event: 2004-11-16 - 2004-11-18 %C Stanford, CA, USA %X Adaptive sampling techniques typically applied in path tracing are not<br>progressive. The reason is that they need all the samples used to compute<br>pixel color for error estimation. Thus progressive computation would need<br>to store all the samples for all the pixels, which is too expensive.<br>Absence of progressivity is a big disadvantage of adaptive path tracing<br>algorithms because a user may become aware of some unwanted effects on the<br>image only after quite significant time. We propose a new estimate of local<br>error in path tracing. The new technique happens to be lightweight in terms<br>of both memory and execution time and lends itself very well to<br>progressivity. Also, even thought perceptual error metric is used, it<br>allows changes of any tone mapping parameters during the course of<br>computation. In this case none of the previous effort is lost, error<br>distribution is immediately updated and used for refining the solution. %B Vision, modeling, and visualization 2004 %E Girod, Bernd; Magnor, Marcus A.; Seidel, Hans-Peter %P 249 - 254 %I Akademische Verlagsgesellschaft Aka %@ 3-89838-058-0
Dmitriev, K., Havran, V., and Seidel, H.-P. 2004a. Faster ray tracing with SIMD shaft culling. Max-Planck-Institut für Informatik, Saarbrücken.
Export
BibTeX
@techreport{, TITLE = {Faster ray tracing with {SIMD} shaft culling}, AUTHOR = {Dmitriev, Kirill and Havran, Vlastimil and Seidel, Hans-Peter}, LANGUAGE = {eng}, NUMBER = {MPI-I-2004-4-006}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2004}, DATE = {2004-12}, TYPE = {Max-Planck-Institut f&#252;r Informatik <Saarbr&#252;cken>: Research Report}, EDITOR = {{Max-Planck-Institut f{\"u}r Informatik {\textless}Saarbr{\"u}cken{\textgreater}}}, }
Endnote
%0 Report %A Dmitriev, Kirill %A Havran, Vlastimil %A Seidel, Hans-Peter %+ Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Faster ray tracing with SIMD shaft culling : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-28BB-A %F EDOC: 237860 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2004 %P 13 p. %B Max-Planck-Institut f&#252;r Informatik <Saarbr&#252;cken>: Research Report
Dmitriev, K., Annen, T., Krawczyk, G., Myszkowski, K., and Seidel, H.-P. 2004b. A CAVE System for Interactive Modeling of Global Illumination in Car Interior. Proceedings of the ACM Symposium on Virtual Reality Software and Technology (VRST 2004), ACM.
Abstract
Global illumination dramatically improves realistic appearance<br>of rendered scenes, but usually it is neglected in VR systems<br>due to its high costs. In this work we present an efficient <br>global illumination solution specifically tailored for those CAVE<br>applications, which require an immediate response for dynamic light<br>changes and allow for free motion of the observer, but involve scenes<br>with static geometry. As an application example we choose<br>the car interior modeling under free driving conditions.<br>We illuminate the car using dynamically changing High Dynamic Range <br>(HDR) environment maps and use the Precomputed Radiance Transfer (PRT) <br>method for the global illumination computation. We<br>leverage the PRT method to handle scenes with non-trivial topology<br>represented by complex meshes. Also, we propose a hybrid of<br>PRT and final gathering approach for high-quality rendering<br>of objects with complex Bi-directional Reflectance Distribution<br>Function (BRDF). We use<br>this method for predictive rendering of the navigation LCD panel<br>based on its measured BRDF. Since the global illumination <br>computation leads to HDR images we propose a tone mapping<br>algorithm tailored specifically for the CAVE. We employ <br>head tracking to identify the observed screen region<br>and derive for it proper luminance adaptation conditions, <br>which are then used for tone mapping on all walls in the CAVE.<br>We distribute our global illumination and tone mapping computation<br>on all CPUs and GPUs available in the<br>CAVE, which enables us to achieve interactive performance<br>even for the costly final gathering approach.
Export
BibTeX
@inproceedings{Dmitriev-et-al_VRST04, TITLE = {A {CAVE} System for Interactive Modeling of Global Illumination in Car Interior}, AUTHOR = {Dmitriev, Kirill and Annen, Thomas and Krawczyk, Grzegorz and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-58113-907-5}, DOI = {10.1145/1077534.1077560}, LOCALID = {Local-ID: C125675300671F7B-9738E2CF6F79F214C1256F5E004819E6-dmitriev04acs}, PUBLISHER = {ACM}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {Global illumination dramatically improves realistic appearance<br>of rendered scenes, but usually it is neglected in VR systems<br>due to its high costs. In this work we present an efficient <br>global illumination solution specifically tailored for those CAVE<br>applications, which require an immediate response for dynamic light<br>changes and allow for free motion of the observer, but involve scenes<br>with static geometry. As an application example we choose<br>the car interior modeling under free driving conditions.<br>We illuminate the car using dynamically changing High Dynamic Range <br>(HDR) environment maps and use the Precomputed Radiance Transfer (PRT) <br>method for the global illumination computation. We<br>leverage the PRT method to handle scenes with non-trivial topology<br>represented by complex meshes. Also, we propose a hybrid of<br>PRT and final gathering approach for high-quality rendering<br>of objects with complex Bi-directional Reflectance Distribution<br>Function (BRDF). We use<br>this method for predictive rendering of the navigation LCD panel<br>based on its measured BRDF. Since the global illumination <br>computation leads to HDR images we propose a tone mapping<br>algorithm tailored specifically for the CAVE. We employ <br>head tracking to identify the observed screen region<br>and derive for it proper luminance adaptation conditions, <br>which are then used for tone mapping on all walls in the CAVE.<br>We distribute our global illumination and tone mapping computation<br>on all CPUs and GPUs available in the<br>CAVE, which enables us to achieve interactive performance<br>even for the costly final gathering approach.}, BOOKTITLE = {Proceedings of the ACM Symposium on Virtual Reality Software and Technology (VRST 2004)}, EDITOR = {Lau, Rynson and Baciu, George}, PAGES = {137--145}, ADDRESS = {Hong Kong}, }
Endnote
%0 Conference Proceedings %A Dmitriev, Kirill %A Annen, Thomas %A Krawczyk, Grzegorz %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A CAVE System for Interactive Modeling of Global Illumination in Car Interior : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-29FF-A %F EDOC: 231986 %F OTHER: Local-ID: C125675300671F7B-9738E2CF6F79F214C1256F5E004819E6-dmitriev04acs %R 10.1145/1077534.1077560 %D 2004 %B ACM Symposium on Virtual Reality Software and Technology 2004 %Z date of event: 2004-11-10 - 2004-11-12 %C Hong Kong %X Global illumination dramatically improves realistic appearance<br>of rendered scenes, but usually it is neglected in VR systems<br>due to its high costs. In this work we present an efficient <br>global illumination solution specifically tailored for those CAVE<br>applications, which require an immediate response for dynamic light<br>changes and allow for free motion of the observer, but involve scenes<br>with static geometry. As an application example we choose<br>the car interior modeling under free driving conditions.<br>We illuminate the car using dynamically changing High Dynamic Range <br>(HDR) environment maps and use the Precomputed Radiance Transfer (PRT) <br>method for the global illumination computation. We<br>leverage the PRT method to handle scenes with non-trivial topology<br>represented by complex meshes. Also, we propose a hybrid of<br>PRT and final gathering approach for high-quality rendering<br>of objects with complex Bi-directional Reflectance Distribution<br>Function (BRDF). We use<br>this method for predictive rendering of the navigation LCD panel<br>based on its measured BRDF. Since the global illumination <br>computation leads to HDR images we propose a tone mapping<br>algorithm tailored specifically for the CAVE. We employ <br>head tracking to identify the observed screen region<br>and derive for it proper luminance adaptation conditions, <br>which are then used for tone mapping on all walls in the CAVE.<br>We distribute our global illumination and tone mapping computation<br>on all CPUs and GPUs available in the<br>CAVE, which enables us to achieve interactive performance<br>even for the costly final gathering approach. %B Proceedings of the ACM Symposium on Virtual Reality Software and Technology %E Lau, Rynson; Baciu, George %P 137 - 145 %I ACM %@ 978-1-58113-907-5
Deussen, O., Müller, H., Saupe, D., and Seidel, H.-P. 2004. Graphische Datenverarbeitung. Informatik Spektrum27, 6.
Export
BibTeX
@article{DBLP:journals/insk/DeussenMSS04, TITLE = {{Graphische Datenverarbeitung}}, AUTHOR = {Deussen, Oliver and M{\"u}ller, Heinrich and Saupe, Dietmar and Seidel, Hans-Peter}, LANGUAGE = {deu}, ISSN = {0170-6012}, DOI = {10.1007/S00287-004-0434-5}, PUBLISHER = {Springer-Verlag}, ADDRESS = {Berlin}, YEAR = {2004}, DATE = {2004}, JOURNAL = {Informatik Spektrum}, VOLUME = {27}, NUMBER = {6}, PAGES = {495}, }
Endnote
%0 Journal Article %A Deussen, Oliver %A M&#252;ller, Heinrich %A Saupe, Dietmar %A Seidel, Hans-Peter %+ External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Graphische Datenverarbeitung : %G deu %U http://hdl.handle.net/21.11116/0000-000F-28E2-5 %R 10.1007/S00287-004-0434-5 %D 2004 %J Informatik Spektrum %V 27 %N 6 %& 495 %P 495 %I Springer-Verlag %C Berlin %@ false %U https://rdcu.be/dETqX
De Aguiar, E., Theobalt, C., Magnor, M., Theisel, H., and Seidel, H.-P. 2004. M3: Marker-free Model Reconstruction and Motion Tracking from 3D Voxel Data. Proceedings of the 12th Pacific Conference on Computer Graphics and Applications (PG 2004), IEEE.
Export
BibTeX
@inproceedings{Theobalt-et-al_PG04, TITLE = {M3: Marker-free Model Reconstruction and Motion Tracking from {3D} Voxel Data}, AUTHOR = {de Aguiar, Edilson and Theobalt, Christian and Magnor, Marcus and Theisel, Holger and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7395-2234-3}, DOI = {10.1109/PCCGA.2004.1348340}, LOCALID = {Local-ID: C1256BDE005F57A8-FF167E72E19075E8C1256F47003AC1C2-Aguiar2004:MFM}, PUBLISHER = {IEEE}, YEAR = {2004}, DATE = {2004}, BOOKTITLE = {Proceedings of the 12th Pacific Conference on Computer Graphics and Applications (PG 2004)}, EDITOR = {Cohen-Or, Daniel and Ko, Hyeong-Seok and Terzopoulos, Demetri and Warren, Joe}, PAGES = {101--110}, ADDRESS = {Seoul, Korea}, }
Endnote
%0 Conference Proceedings %A de Aguiar, Edilson %A Theobalt, Christian %A Magnor, Marcus %A Theisel, Holger %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Programming Logics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T M3: Marker-free Model Reconstruction and Motion Tracking from 3D Voxel Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2BAC-7 %F EDOC: 241614 %F OTHER: Local-ID: C1256BDE005F57A8-FF167E72E19075E8C1256F47003AC1C2-Aguiar2004:MFM %R 10.1109/PCCGA.2004.1348340 %D 2004 %B 12th Pacific Conference on Computer Graphics and Applications %Z date of event: 2004-10-06 - 2004-10-08 %C Seoul, Korea %B Proceedings of the 12th Pacific Conference on Computer Graphics and Applications %E Cohen-Or, Daniel; Ko, Hyeong-Seok; Terzopoulos, Demetri; Warren, Joe %P 101 - 110 %I IEEE %@ 0-7395-2234-3
Choi, S.W. and Seidel, H.-P. 2004. Linear One-sided Stability of MAT for Weakly Injective 3D Domain. Computer Aided Design36, 2.
Export
BibTeX
@article{DBLP:journals/cad/ChoiS04, TITLE = {Linear One-sided Stability of {MAT} for Weakly Injective {3D} Domain}, AUTHOR = {Choi, Sung Woo and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0010-4485}, DOI = {10.1016/S0010-4485(03)00055-1}, PUBLISHER = {Elsevier}, ADDRESS = {Oxford [etc.]}, YEAR = {2004}, DATE = {2004}, JOURNAL = {Computer Aided Design}, VOLUME = {36}, NUMBER = {2}, PAGES = {95--109}, }
Endnote
%0 Journal Article %A Choi, Sung Woo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Linear One-sided Stability of MAT for Weakly Injective 3D Domain : %G eng %U http://hdl.handle.net/21.11116/0000-000F-2941-A %R 10.1016/S0010-4485(03)00055-1 %D 2004 %J Computer Aided Design %V 36 %N 2 %& 95 %P 95 - 109 %I Elsevier %C Oxford [etc.] %@ false
Blanz, V., Scherbaum, K., Vetter, T., and Seidel, H.-P. 2004a. Exchanging Faces in Images. Computer Graphics Forum (Proc. EUROGRAPHICS 2004), Blackwell.
Export
BibTeX
@inproceedings{Blanz-et-al_EUROGRAPHICS04, TITLE = {Exchanging Faces in Images}, AUTHOR = {Blanz, Volker and Scherbaum, Kristina and Vetter, Thomas and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2004.00799.x}, LOCALID = {Local-ID: C125675300671F7B-CEA089FA1CCE94B7C1256F9D003AF9EB-BlaScherVetSei04}, PUBLISHER = {Blackwell}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2004}, DATE = {2004}, BOOKTITLE = {The European Association for Computer Graphics 25th Annual Conference (EUROGRAPHICS 2004)}, EDITOR = {Cani, Marie-Paule and Slater, Mel}, PAGES = {669--676}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {23}, ISSUE = {6}, ADDRESS = {Grenoble, France}, }
Endnote
%0 Conference Proceedings %A Blanz, Volker %A Scherbaum, Kristina %A Vetter, Thomas %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Exchanging Faces in Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2A91-9 %F EDOC: 232069 %F OTHER: Local-ID: C125675300671F7B-CEA089FA1CCE94B7C1256F9D003AF9EB-BlaScherVetSei04 %R 10.1111/j.1467-8659.2004.00799.x %D 2004 %B EUROGRAPHICS 2004 %Z date of event: 2004-08-30 - %C Grenoble, France %B The European Association for Computer Graphics 25th Annual Conference %E Cani, Marie-Paule; Slater, Mel %P 669 - 676 %I Blackwell %J Computer Graphics Forum %V 23 %N 6 %I Blackwell-Wiley %@ false
Blanz, V., Mehl, A., Vetter, T., and Seidel, H.-P. 2004b. A Statistical Method for Robust 3D Surface Reconstruction from Sparse Data. Proceedings of the 2nd International Symposium on 3D Data Processing, Visualization, and Transmission (3DPVT 2004), IEEE.
Export
BibTeX
@inproceedings{Blanz-et-al_3DPVT04, TITLE = {A Statistical Method for Robust {3D} Surface Reconstruction from Sparse Data}, AUTHOR = {Blanz, Volker and Mehl, Albert and Vetter, Thomas and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-2}, DOI = {10.1109/TDPVT.2004.1335212}, LOCALID = {Local-ID: C125675300671F7B-B5CDC2F4F6E701E6C1256F9D003C0AEC-BlaMeVetSei04}, PUBLISHER = {IEEE}, YEAR = {2004}, DATE = {2004}, BOOKTITLE = {Proceedings of the 2nd International Symposium on 3D Data Processing, Visualization, and Transmission (3DPVT 2004)}, EDITOR = {Aloimonos, Yiannis and Taubin, Gabriel}, PAGES = {293--300}, ADDRESS = {Thessaloniki, Greece}, }
Endnote
%0 Conference Proceedings %A Blanz, Volker %A Mehl, Albert %A Vetter, Thomas %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T A Statistical Method for Robust 3D Surface Reconstruction from Sparse Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-29B2-3 %F EDOC: 231310 %F OTHER: Local-ID: C125675300671F7B-B5CDC2F4F6E701E6C1256F9D003C0AEC-BlaMeVetSei04 %R 10.1109/TDPVT.2004.1335212 %D 2004 %B 2nd International Symposium on 3D Data Processing, Visualization, and Transmission %Z date of event: 2004-09-06 - 2004-09-09 %C Thessaloniki, Greece %B Proceedings of the 2nd International Symposium on 3D Data Processing, Visualization, and Transmission %E Aloimonos, Yiannis; Taubin, Gabriel %P 293 - 300 %I IEEE %@ 0-7695-2
Annen, T., Kautz, J., Durand, F., and Seidel, H.-P. 2004a. Spherical Harmonic Gradients. ACM SIGGRAPH 2004 Sketches, ACM.
Export
BibTeX
@inproceedings{DBLP:conf/siggraph/AnnenKDS04, TITLE = {Spherical Harmonic Gradients}, AUTHOR = {Annen, Thomas and Kautz, Jan and Durand, Fr{\'e}do and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-58113-896-2}, DOI = {10.1145/1186223.1186361}, PUBLISHER = {ACM}, YEAR = {2004}, DATE = {2004}, BOOKTITLE = {ACM SIGGRAPH 2004 Sketches}, EDITOR = {Barzel, Ronen}, PAGES = {110}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Annen, Thomas %A Kautz, Jan %A Durand, Fr&#233;do %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Spherical Harmonic Gradients : %G eng %U http://hdl.handle.net/21.11116/0000-000F-1D8F-1 %R 10.1145/1186223.1186361 %D 2004 %B 2004 International Conference on Computer Graphics and Interactive Techniques %Z date of event: 2004-08-08 - 2004-08-12 %C Los Angeles, CA, USA %B ACM SIGGRAPH 2004 Sketches %E Barzel, Ronen %P 110 %I ACM %@ 978-1-58113-896-2
Annen, T., Kautz, J., Durand, F., and Seidel, H.-P. 2004b. Spherical Harmonic Gradients for Mid-Range Illumination. Rendering Techniques 2004 (EGSR 2004), The Eurographics Association.
Abstract
Spherical harmonics are often used for compact description of<br>incident radiance in low-frequency but distant lighting <br>environments. For interaction with nearby emitters, computing<br>the incident radiance at the center of an object only is<br>not sufficient. Previous techniques then require expensive <br>sampling of the incident radiance field at many points <br>distributed over the object. Our technique alleviates this<br>costly requirement using a first-order Taylor expansion of the<br>spherical-harmonic lighting coefficients around a point. We <br>propose an interpolation scheme based on these gradients <br>requiring far fewer samples (one is often sufficient). We show <br>that the gradient of the incident-radiance spherical harmonics <br>can be computed for little additional cost compared to the <br>coefficients alone. We introduce a semi-analytical formula to <br>calculate this gradient at run-time and describe how a simple vertex shader can <br>interpolate the shading. The interpolated <br>representation of the incident radiance can be used with any <br>low-frequency light-transfer technique.
Export
BibTeX
@inproceedings{Annen-et-al_EGSR04, TITLE = {Spherical Harmonic Gradients for Mid-Range Illumination}, AUTHOR = {Annen, Thomas and Kautz, Jan and Durand, Fr{\'e}do and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-905673-12-6}, DOI = {10.2312/EGWR/EGSR04/331-336}, LOCALID = {Local-ID: C125675300671F7B-8DECFDE55BFA3E80C1256EBE0030D372-Annen:EGSR:2004}, PUBLISHER = {The Eurographics Association}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {Spherical harmonics are often used for compact description of<br>incident radiance in low-frequency but distant lighting <br>environments. For interaction with nearby emitters, computing<br>the incident radiance at the center of an object only is<br>not sufficient. Previous techniques then require expensive <br>sampling of the incident radiance field at many points <br>distributed over the object. Our technique alleviates this<br>costly requirement using a first-order Taylor expansion of the<br>spherical-harmonic lighting coefficients around a point. We <br>propose an interpolation scheme based on these gradients <br>requiring far fewer samples (one is often sufficient). We show <br>that the gradient of the incident-radiance spherical harmonics <br>can be computed for little additional cost compared to the <br>coefficients alone. We introduce a semi-analytical formula to <br>calculate this gradient at run-time and describe how a simple vertex shader can <br>interpolate the shading. The interpolated <br>representation of the incident radiance can be used with any <br>low-frequency light-transfer technique.}, BOOKTITLE = {Rendering Techniques 2004 (EGSR 2004)}, EDITOR = {Keller, Alexander and Jensen, Henrik}, PAGES = {331--336}, ADDRESS = {Norrk{\"o}ping, Sweden}, }
Endnote
%0 Conference Proceedings %A Annen, Thomas %A Kautz, Jan %A Durand, Fr&#233;do %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Spherical Harmonic Gradients for Mid-Range Illumination : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2B42-2 %F EDOC: 231868 %F OTHER: Local-ID: C125675300671F7B-8DECFDE55BFA3E80C1256EBE0030D372-Annen:EGSR:2004 %R 10.2312/EGWR/EGSR04/331-336 %D 2004 %B 15th Eurographics Symposium on Rendering Techniques %Z date of event: 2004-06-21 - 2004-06-23 %C Norrk&#246;ping, Sweden %X Spherical harmonics are often used for compact description of<br>incident radiance in low-frequency but distant lighting <br>environments. For interaction with nearby emitters, computing<br>the incident radiance at the center of an object only is<br>not sufficient. Previous techniques then require expensive <br>sampling of the incident radiance field at many points <br>distributed over the object. Our technique alleviates this<br>costly requirement using a first-order Taylor expansion of the<br>spherical-harmonic lighting coefficients around a point. We <br>propose an interpolation scheme based on these gradients <br>requiring far fewer samples (one is often sufficient). We show <br>that the gradient of the incident-radiance spherical harmonics <br>can be computed for little additional cost compared to the <br>coefficients alone. We introduce a semi-analytical formula to <br>calculate this gradient at run-time and describe how a simple vertex shader can <br>interpolate the shading. The interpolated <br>representation of the incident radiance can be used with any <br>low-frequency light-transfer technique. %B Rendering Techniques 2004 %E Keller, Alexander; Jensen, Henrik %P 331 - 336 %I The Eurographics Association %@ 3-905673-12-6
Ahn, M., Lee, S., and Seidel, H.-P. 2004. Connectivity Transformation for Mesh Metamorphosis. Proceedings of the 2004 Eurographics Symposium on Geometry Processing (SGP 2004), The Eurographics Association.
Export
BibTeX
@inproceedings{Ahn-et-al_SGP04, TITLE = {Connectivity Transformation for Mesh Metamorphosis}, AUTHOR = {Ahn, Minsu and Lee, Seungyong and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-905673-13-4/1727-8384}, DOI = {10.2312/SGP/SGP04/077-084}, LOCALID = {Local-ID: C125675300671F7B-0D08DEDF77433FE1C1256FB0002DBBF6-Lee2004a}, PUBLISHER = {The Eurographics Association}, YEAR = {2004}, DATE = {2004}, BOOKTITLE = {Proceedings of the 2004 Eurographics Symposium on Geometry Processing (SGP 2004)}, EDITOR = {Scopigno, Roberto and Zorin, Denis and Fellner, Dieter and Spencer, Stephen}, PAGES = {77--84}, SERIES = {Eurographics / ACM SIGGRAPH Symposium Proceedings}, ADDRESS = {Nice, France}, }
Endnote
%0 Conference Proceedings %A Ahn, Minsu %A Lee, Seungyong %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Connectivity Transformation for Mesh Metamorphosis : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2A64-D %F EDOC: 231991 %F OTHER: Local-ID: C125675300671F7B-0D08DEDF77433FE1C1256FB0002DBBF6-Lee2004a %R 10.2312/SGP/SGP04/077-084 %D 2004 %B 2004 Eurographics Symposium on Geometry Processing %Z date of event: 2004-07-08 - 2004-07-10 %C Nice, France %B Proceedings of the 2004 Eurographics Symposium on Geometry Processing %E Scopigno, Roberto; Zorin, Denis; Fellner, Dieter; Spencer, Stephen %P 77 - 84 %I The Eurographics Association %@ 3-905673-13-4/1727-8384 %B Eurographics / ACM SIGGRAPH Symposium Proceedings
2003
Zayer, R., Rössl, C., and Seidel, H.-P. 2003a. Convex Boundary Angle Based Flattening. Vision, Modeling and Visualization 2003 (VMV 2003), Akademische Verlagsgesellschaft Aka.
Export
BibTeX
@inproceedings{Zayer-et-al_VMV03, TITLE = {Convex Boundary Angle Based Flattening}, AUTHOR = {Zayer, Rhaleb and R{\"o}ssl, Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-89838-048-3}, LOCALID = {Local-ID: C125675300671F7B-4ED4FCAA4F2E8312C1256D82004C8BDF-Zayer2003}, PUBLISHER = {Akademische Verlagsgesellschaft Aka}, YEAR = {2003}, DATE = {2003}, BOOKTITLE = {Vision, Modeling and Visualization 2003 (VMV 2003)}, EDITOR = {Ertl, Thomas and Girod, Bernd and Greiner, G{\"u}nther and Niemann, Heinrich and Seidel, Hans-Peter and Steinbach, Eckehard and Westermann, R{\"u}diger}, PAGES = {281--288}, ADDRESS = {Munich, Germany}, }
Endnote
%0 Conference Proceedings %A Zayer, Rhaleb %A R&#246;ssl, Christian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Convex Boundary Angle Based Flattening : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2CA0-8 %F EDOC: 201953 %F OTHER: Local-ID: C125675300671F7B-4ED4FCAA4F2E8312C1256D82004C8BDF-Zayer2003 %D 2003 %B International Fall Workshop on Vision, Modeling and Visualization 2003 %Z date of event: 2003-11-19 - 2003-11-21 %C Munich, Germany %B Vision, Modeling and Visualization 2003 %E Ertl, Thomas; Girod, Bernd; Greiner, G&#252;nther; Niemann, Heinrich; Seidel, Hans-Peter; Steinbach, Eckehard; Westermann, R&#252;diger %P 281 - 288 %I Akademische Verlagsgesellschaft Aka %@ 3-89838-048-3
Zayer, R., Rössl, C., and Seidel, H.-P. 2003b. Convex boundary angle based flattening. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
Angle Based Flattening is a robust parameterization method that finds a quasi-conformal mapping by solving a non-linear optimization problem. We take advantage of a characterization of convex planar drawings of triconnected graphs to introduce new boundary constraints. This prevents boundary intersections and avoids post-processing of the parameterized mesh. We present a simple transformation to e ectively relax the constrained minimization problem, which improves the convergence of the optimization method. As a natural extension, we discuss the construction of Delaunay flat meshes. This may further enhance the quality of the resulting parameterization.
Export
BibTeX
@techreport{ZayerRoesslSeidel2003, TITLE = {Convex boundary angle based flattening}, AUTHOR = {Zayer, Rhaleb and R{\"o}ssl, Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2003-4-003}, NUMBER = {MPI-I-2003-4-003}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {Angle Based Flattening is a robust parameterization method that finds a quasi-conformal mapping by solving a non-linear optimization problem. We take advantage of a characterization of convex planar drawings of triconnected graphs to introduce new boundary constraints. This prevents boundary intersections and avoids post-processing of the parameterized mesh. We present a simple transformation to e ectively relax the constrained minimization problem, which improves the convergence of the optimization method. As a natural extension, we discuss the construction of Delaunay flat meshes. This may further enhance the quality of the resulting parameterization.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Zayer, Rhaleb %A R&#246;ssl, Christian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Convex boundary angle based flattening : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-6AED-3 %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2003-4-003 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2003 %P 16 p. %X Angle Based Flattening is a robust parameterization method that finds a quasi-conformal mapping by solving a non-linear optimization problem. We take advantage of a characterization of convex planar drawings of triconnected graphs to introduce new boundary constraints. This prevents boundary intersections and avoids post-processing of the parameterized mesh. We present a simple transformation to e ectively relax the constrained minimization problem, which improves the convergence of the optimization method. As a natural extension, we discuss the construction of Delaunay flat meshes. This may further enhance the quality of the resulting parameterization. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Yoshizawa, S., Belyaev, A., and Seidel, H.-P. 2003. Free-form Skeleton-driven Mesh Deformations. Proceedings of the 8th ACM Symposium on Solid Modeling and Applications (SM 2003), ACM.
Export
BibTeX
@inproceedings{Yoshizawa-et-al_SM03, TITLE = {Free-form Skeleton-driven Mesh Deformations}, AUTHOR = {Yoshizawa, Shin and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {1-58113-706-0}, DOI = {10.1145/781606.781643}, LOCALID = {Local-ID: C125675300671F7B-7DAD696946262B4AC1256CF3006CDDA0-sm03ybs}, PUBLISHER = {ACM}, YEAR = {2003}, DATE = {2003}, BOOKTITLE = {Proceedings of the 8th ACM Symposium on Solid Modeling and Applications (SM 2003)}, EDITOR = {Elber, Gershon and Shapiro, Vadim}, PAGES = {247--253}, ADDRESS = {Seattle, USA}, }
Endnote
%0 Conference Proceedings %A Yoshizawa, Shin %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Free-form Skeleton-driven Mesh Deformations : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2D08-5 %F EDOC: 201989 %F OTHER: Local-ID: C125675300671F7B-7DAD696946262B4AC1256CF3006CDDA0-sm03ybs %R 10.1145/781606.781643 %D 2003 %B 8th ACM Symposium on Solid Modeling and Applications %Z date of event: 2003-06-16 - 2003-06-20 %C Seattle, USA %B Proceedings of the 8th ACM Symposium on Solid Modeling and Applications %E Elber, Gershon; Shapiro, Vadim %P 247 - 253 %I ACM %@ 1-58113-706-0
Yamauchi, H., Haber, J., and Seidel, H.-P. 2003. Image Restoration using Multiresolution Texture Synthesis and Image Inpainting. Proceedings of the Computer Graphics International (CGI 2003), IEEE Computer Society.
Abstract
We present a new method for the restoration of digitized photographs.<br>Restoration in this context refers to removal of image defects such as<br>scratches and blotches as well as to removal of disturbing objects as, for<br>instance, subtitles, logos, wires, and microphones. <br><br>Our method combines techniques from texture synthesis and image<br>inpainting, bridging the gap between these two approaches that have<br>recently attracted strong research interest. Combining image inpainting<br>and texture synthesis in a multiresolution approach gives us the best of<br>both worlds and enables us to overcome the limitations of each of those<br>individual approaches.<br><br>The restored images obtained with our method look plausible in general and<br>surprisingly good in some cases. This is demonstrated for a variety of<br>input images that exhibit different kinds of defects.
Export
BibTeX
@inproceedings{Yamauchi-et-al_CGI03, TITLE = {Image Restoration using Multiresolution Texture Synthesis and Image Inpainting}, AUTHOR = {Yamauchi, Hitoshi and Haber, J{\"o}rg and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-1946-6}, DOI = {10.1109/CGI.2003.1214456}, LOCALID = {Local-ID: C125675300671F7B-E731CE058F421233C1256CC50036596B-Yamauchi:IRMTSII}, PUBLISHER = {IEEE Computer Society}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {We present a new method for the restoration of digitized photographs.<br>Restoration in this context refers to removal of image defects such as<br>scratches and blotches as well as to removal of disturbing objects as, for<br>instance, subtitles, logos, wires, and microphones. <br><br>Our method combines techniques from texture synthesis and image<br>inpainting, bridging the gap between these two approaches that have<br>recently attracted strong research interest. Combining image inpainting<br>and texture synthesis in a multiresolution approach gives us the best of<br>both worlds and enables us to overcome the limitations of each of those<br>individual approaches.<br><br>The restored images obtained with our method look plausible in general and<br>surprisingly good in some cases. This is demonstrated for a variety of<br>input images that exhibit different kinds of defects.}, BOOKTITLE = {Proceedings of the Computer Graphics International (CGI 2003)}, PAGES = {120--125}, ADDRESS = {Tokyo, Japan}, }
Endnote
%0 Conference Proceedings %A Yamauchi, Hitoshi %A Haber, J&#246;rg %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Image Restoration using Multiresolution Texture Synthesis and Image Inpainting : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2D30-B %F EDOC: 201946 %F OTHER: Local-ID: C125675300671F7B-E731CE058F421233C1256CC50036596B-Yamauchi:IRMTSII %R 10.1109/CGI.2003.1214456 %D 2003 %B Computer Graphics International 2003 %Z date of event: 2003-07-09 - 2003-07-11 %C Tokyo, Japan %X We present a new method for the restoration of digitized photographs.<br>Restoration in this context refers to removal of image defects such as<br>scratches and blotches as well as to removal of disturbing objects as, for<br>instance, subtitles, logos, wires, and microphones. <br><br>Our method combines techniques from texture synthesis and image<br>inpainting, bridging the gap between these two approaches that have<br>recently attracted strong research interest. Combining image inpainting<br>and texture synthesis in a multiresolution approach gives us the best of<br>both worlds and enables us to overcome the limitations of each of those<br>individual approaches.<br><br>The restored images obtained with our method look plausible in general and<br>surprisingly good in some cases. This is demonstrated for a variety of<br>input images that exhibit different kinds of defects. %B Proceedings of the Computer Graphics International %P 120 - 125 %I IEEE Computer Society %@ 0-7695-1946-6
Vorsatz, J., Rössl, C., and Seidel, H.-P. 2003a. Dynamic Remeshing and Applications. Proceedings of the 8th ACM Symposium on Solid Modeling and Applications (SM 2003), ACM.
Abstract
Triangle meshes are a flexible and generally accepted boundary<br> representation for complex geometric shapes. In addition to their<br> geometric qualities and topological simplicity, \emph{intrinsic}<br> qualities such as the shape of the triangles, their distribution on<br> the surface and the connectivity are essential for many algorithms<br> working on them. In this paper we present a flexible and efficient<br> remeshing framework that improves these intrinsic properties while<br> keeping the mesh geometrically close to the original surface. We<br> use a particle system approach and combine it with an incremental<br> connectivity optimization process to trim the mesh towards the<br> requirements imposed by the user. The particle system uniformly<br> distributes the vertices on the mesh, whereas the connectivity<br> optimization is done by means of \emph{Dynamic Connectivity Meshes},<br> a combination of local topological operators that lead to a fairly<br> regular connectivity. A dynamic skeleton ensures that our approach<br> is able to preserve surface features, which are particularly<br> important for the visual quality of the mesh. None of the<br> algorithms requires a global parameterization or patch layouting in<br> a preprocessing step but uses local parameterizations only. We also<br> show how this general framework can be put into practice and sketch<br> several application scenarios. In particular we will show how the<br> users can adapt the involved algorithms in a way that the resulting<br> remesh meets their personal requirements.
Export
BibTeX
@inproceedings{Vorsatz-et-al_SM03, TITLE = {Dynamic Remeshing and Applications}, AUTHOR = {Vorsatz, Jens and R{\"o}ssl, Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-58113-706-4}, DOI = {10.1145/781606.781633}, LOCALID = {Local-ID: C125675300671F7B-690DFDA53D7BD4D8C1256CD4004EB2F0-Vorsatz:2003:DRA}, PUBLISHER = {ACM}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {Triangle meshes are a flexible and generally accepted boundary<br> representation for complex geometric shapes. In addition to their<br> geometric qualities and topological simplicity, \emph{intrinsic}<br> qualities such as the shape of the triangles, their distribution on<br> the surface and the connectivity are essential for many algorithms<br> working on them. In this paper we present a flexible and efficient<br> remeshing framework that improves these intrinsic properties while<br> keeping the mesh geometrically close to the original surface. We<br> use a particle system approach and combine it with an incremental<br> connectivity optimization process to trim the mesh towards the<br> requirements imposed by the user. The particle system uniformly<br> distributes the vertices on the mesh, whereas the connectivity<br> optimization is done by means of \emph{Dynamic Connectivity Meshes},<br> a combination of local topological operators that lead to a fairly<br> regular connectivity. A dynamic skeleton ensures that our approach<br> is able to preserve surface features, which are particularly<br> important for the visual quality of the mesh. None of the<br> algorithms requires a global parameterization or patch layouting in<br> a preprocessing step but uses local parameterizations only. We also<br> show how this general framework can be put into practice and sketch<br> several application scenarios. In particular we will show how the<br> users can adapt the involved algorithms in a way that the resulting<br> remesh meets their personal requirements.}, BOOKTITLE = {Proceedings of the 8th ACM Symposium on Solid Modeling and Applications (SM 2003)}, EDITOR = {Elber, Gershon and Shapiro, Vadim}, PAGES = {167--175}, ADDRESS = {Seattle, WA, USA}, }
Endnote
%0 Conference Proceedings %A Vorsatz, Jens %A R&#246;ssl, Christian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Dynamic Remeshing and Applications : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2CC6-1 %F EDOC: 201843 %F OTHER: Local-ID: C125675300671F7B-690DFDA53D7BD4D8C1256CD4004EB2F0-Vorsatz:2003:DRA %R 10.1145/781606.781633 %D 2003 %B 8th ACM Symposium on Solid Modeling and Applications %Z date of event: 2003-06-16 - 2003-06-20 %C Seattle, WA, USA %X Triangle meshes are a flexible and generally accepted boundary<br> representation for complex geometric shapes. In addition to their<br> geometric qualities and topological simplicity, \emph{intrinsic}<br> qualities such as the shape of the triangles, their distribution on<br> the surface and the connectivity are essential for many algorithms<br> working on them. In this paper we present a flexible and efficient<br> remeshing framework that improves these intrinsic properties while<br> keeping the mesh geometrically close to the original surface. We<br> use a particle system approach and combine it with an incremental<br> connectivity optimization process to trim the mesh towards the<br> requirements imposed by the user. The particle system uniformly<br> distributes the vertices on the mesh, whereas the connectivity<br> optimization is done by means of \emph{Dynamic Connectivity Meshes},<br> a combination of local topological operators that lead to a fairly<br> regular connectivity. A dynamic skeleton ensures that our approach<br> is able to preserve surface features, which are particularly<br> important for the visual quality of the mesh. None of the<br> algorithms requires a global parameterization or patch layouting in<br> a preprocessing step but uses local parameterizations only. We also<br> show how this general framework can be put into practice and sketch<br> several application scenarios. In particular we will show how the<br> users can adapt the involved algorithms in a way that the resulting<br> remesh meets their personal requirements. %B Proceedings of the 8th ACM Symposium on Solid Modeling and Applications %E Elber, Gershon; Shapiro, Vadim %P 167 - 175 %I ACM %@ 978-1-58113-706-4
Vorsatz, J., Rössl, C., and Seidel, H.-P. 2003b. Dynamic Remeshing and Applications. Journal of Computing and Information Science in Engineering3, 4.
Export
BibTeX
@article{Vorsatz-et-al_JCISE03, TITLE = {Dynamic Remeshing and Applications}, AUTHOR = {Vorsatz, Jens and R{\"o}ssl, Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1530-9827}, DOI = {10.1115/1.1631021}, LOCALID = {Local-ID: C125675300671F7B-DD9372A15E1E9EA8C1256DFE007378B8-vrs:dra:2003}, PUBLISHER = {American Society of Mechanical Engineers}, ADDRESS = {New York, NY}, YEAR = {2003}, DATE = {2003}, JOURNAL = {Journal of Computing and Information Science in Engineering}, VOLUME = {3}, NUMBER = {4}, PAGES = {338--344}, }
Endnote
%0 Journal Article %A Vorsatz, Jens %A R&#246;ssl, Christian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Dynamic Remeshing and Applications : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2CC9-C %F EDOC: 202018 %F OTHER: Local-ID: C125675300671F7B-DD9372A15E1E9EA8C1256DFE007378B8-vrs:dra:2003 %R 10.1115/1.1631021 %D 2003 %* Review method: peer-reviewed %J Journal of Computing and Information Science in Engineering %V 3 %N 4 %& 338 %P 338 - 344 %I American Society of Mechanical Engineers %C New York, NY %@ false
Vorsatz, J. and Seidel, H.-P. 2003. A Framework for Dynamic Connectivity Meshes. OpenSG Symposium 2003, ACM.
Abstract
Implementing algorithms that are based on dynamic triangle meshes often requires updating internal data-structures as soon as the connectivity of the mesh changes. The design of a class hierarchy that is able to deal with such changes is particularly challenging if the system reaches a certain complexity. The paper proposes a software design that enables the users to efficiently implement algorithms that can handle these dynamic changes while still maintaining a certain encapsulation of the single components. Our design is based on a callback mechanism. A client can register at some {\tt Info}-object and gets informed whenever a change of the connectivity occurs. This way the client is able to keep internal data up-to-date. Our framework enables us to write small client classes that cover just a small dedicated aspect of necessary updates related to the changing connectivity. These small components can be combined to more complex modules and can often easily be reused. Moreover, we do not have to store related 'dynamic data' in one central place, e.g. the mesh, which could lead to a significant memory overhead if an application uses some modules just for a short time. We have used and tested this class design extensively for implementing 'Dynamic Connectivity Meshes and Applications~\cite{Vorsatz:2003:DRA}'. Additionally, as a feasibility study, we have implemented and integrated our concept in the \OM-framework.
Export
BibTeX
@inproceedings{Vorsatz:2003:FDC, TITLE = {A Framework for Dynamic Connectivity Meshes}, AUTHOR = {Vorsatz, Jens and Seidel, Hans-Peter}, EDITOR = {Reiners, D.}, LANGUAGE = {eng}, LOCALID = {Local-ID: C125675300671F7B-7DB11C9D8C3B02F5C1256CED002FE17A-Vorsatz:2003:FDC}, PUBLISHER = {ACM}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {Implementing algorithms that are based on dynamic triangle meshes often requires updating internal data-structures as soon as the connectivity of the mesh changes. The design of a class hierarchy that is able to deal with such changes is particularly challenging if the system reaches a certain complexity. The paper proposes a software design that enables the users to efficiently implement algorithms that can handle these dynamic changes while still maintaining a certain encapsulation of the single components. Our design is based on a callback mechanism. A client can register at some {\tt Info}-object and gets informed whenever a change of the connectivity occurs. This way the client is able to keep internal data up-to-date. Our framework enables us to write small client classes that cover just a small dedicated aspect of necessary updates related to the changing connectivity. These small components can be combined to more complex modules and can often easily be reused. Moreover, we do not have to store related 'dynamic data' in one central place, e.g. the mesh, which could lead to a significant memory overhead if an application uses some modules just for a short time. We have used and tested this class design extensively for implementing 'Dynamic Connectivity Meshes and Applications~\cite{Vorsatz:2003:DRA}'. Additionally, as a feasibility study, we have implemented and integrated our concept in the \OM-framework.}, BOOKTITLE = {OpenSG Symposium 2003}, PAGES = {49--55}, ADDRESS = {Darmstadt, Germany}, }
Endnote
%0 Conference Proceedings %A Vorsatz, Jens %A Seidel, Hans-Peter %E Reiners, D. %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Max Planck Society %T A Framework for Dynamic Connectivity Meshes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2C01-C %F EDOC: 201795 %F OTHER: Local-ID: C125675300671F7B-7DB11C9D8C3B02F5C1256CED002FE17A-Vorsatz:2003:FDC %D 2003 %B OpenSG Symposium 2003 %Z date of event: 2003-03-31 - %C Darmstadt, Germany %X Implementing algorithms that are based on dynamic triangle meshes often requires updating internal data-structures as soon as the connectivity of the mesh changes. The design of a class hierarchy that is able to deal with such changes is particularly challenging if the system reaches a certain complexity. The paper proposes a software design that enables the users to efficiently implement algorithms that can handle these dynamic changes while still maintaining a certain encapsulation of the single components. Our design is based on a callback mechanism. A client can register at some {\tt Info}-object and gets informed whenever a change of the connectivity occurs. This way the client is able to keep internal data up-to-date. Our framework enables us to write small client classes that cover just a small dedicated aspect of necessary updates related to the changing connectivity. These small components can be combined to more complex modules and can often easily be reused. Moreover, we do not have to store related 'dynamic data' in one central place, e.g. the mesh, which could lead to a significant memory overhead if an application uses some modules just for a short time. We have used and tested this class design extensively for implementing 'Dynamic Connectivity Meshes and Applications~\cite{Vorsatz:2003:DRA}'. Additionally, as a feasibility study, we have implemented and integrated our concept in the \OM-framework. %B OpenSG Symposium 2003 %P 49 - 55 %I ACM
Theobalt, C., Carranza, J., Magnor, M., and Seidel, H.-P. 2003a. Enhancing Silhouette-based Human Motion Capture with 3D Motion Fields. Proceedings of the 11th Pacific Conference on Computer Graphics and Applications (PG 2003), IEEE.
Abstract
High-quality non-intrusive human motion capture is necessary for acquistion<br>of model-based free-viewpoint video of human actors.<br>Silhouette-based approaches have demonstrated that they are able to <br>accurately recover a large range of human motion from multi-view video.<br>However, they fail to make use of all available information, specifically that <br>of texture information. This paper presents an algorithm <br>that uses motion fields constructed from optical flow in multi-view video <br>sequences.<br><br>The use of motion fields augments the silhoutte-based method by incorporating <br>texture-information into the tracking process. <br>The algorithm is a key-component in a larger free-viewpoint video system of <br>human actors. <br>Our results demonstrate that our method accurately estimates pose parameters <br>and allows for realistic texture generation in 3D video sequences.
Export
BibTeX
@inproceedings{Theobalt-et-al_PG03, TITLE = {Enhancing Silhouette-based Human Motion Capture with {3D} Motion Fields}, AUTHOR = {Theobalt, Christian and Carranza, Joel and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-2028-6}, DOI = {10.1109/PCCGA.2003.1238260}, LOCALID = {Local-ID: C125675300671F7B-1602F8FA87A6464EC1256D640041B748-TheobaltPG2003}, PUBLISHER = {IEEE}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {High-quality non-intrusive human motion capture is necessary for acquistion<br>of model-based free-viewpoint video of human actors.<br>Silhouette-based approaches have demonstrated that they are able to <br>accurately recover a large range of human motion from multi-view video.<br>However, they fail to make use of all available information, specifically that <br>of texture information. This paper presents an algorithm <br>that uses motion fields constructed from optical flow in multi-view video <br>sequences.<br><br>The use of motion fields augments the silhoutte-based method by incorporating <br>texture-information into the tracking process. <br>The algorithm is a key-component in a larger free-viewpoint video system of <br>human actors. <br>Our results demonstrate that our method accurately estimates pose parameters <br>and allows for realistic texture generation in 3D video sequences.}, BOOKTITLE = {Proceedings of the 11th Pacific Conference on Computer Graphics and Applications (PG 2003)}, EDITOR = {Rokne, Jon and Klein, Reinhard and Wang, Wenping}, PAGES = {185--193}, ADDRESS = {Canmore, Canada}, }
Endnote
%0 Conference Proceedings %A Theobalt, Christian %A Carranza, Joel %A Magnor, Marcus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Programming Logics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Enhancing Silhouette-based Human Motion Capture with 3D Motion Fields : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2CDF-B %F EDOC: 201913 %F OTHER: Local-ID: C125675300671F7B-1602F8FA87A6464EC1256D640041B748-TheobaltPG2003 %R 10.1109/PCCGA.2003.1238260 %D 2003 %B 11th Pacific Conference on Computer Graphics and Applications %Z date of event: 2003-10-08 - 2003-10-10 %C Canmore, Canada %X High-quality non-intrusive human motion capture is necessary for acquistion<br>of model-based free-viewpoint video of human actors.<br>Silhouette-based approaches have demonstrated that they are able to <br>accurately recover a large range of human motion from multi-view video.<br>However, they fail to make use of all available information, specifically that <br>of texture information. This paper presents an algorithm <br>that uses motion fields constructed from optical flow in multi-view video <br>sequences.<br><br>The use of motion fields augments the silhoutte-based method by incorporating <br>texture-information into the tracking process. <br>The algorithm is a key-component in a larger free-viewpoint video system of <br>human actors. <br>Our results demonstrate that our method accurately estimates pose parameters <br>and allows for realistic texture generation in 3D video sequences. %B Proceedings of the 11th Pacific Conference on Computer Graphics and Applications %E Rokne, Jon; Klein, Reinhard; Wang, Wenping %P 185 - 193 %I IEEE %@ 0-7695-2028-6
Theobalt, C., Li, M., Magnor, M.A., and Seidel, H.-P. 2003b. A flexible and versatile studio for synchronized multi-view video recording. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
In recent years, the convergence of Computer Vision and Computer Graphics has put forth new research areas that work on scene reconstruction from and analysis of multi-view video footage. In free-viewpoint video, for example, new views of a scene are generated from an arbitrary viewpoint in real-time from a set of real multi-view input video streams. The analysis of real-world scenes from multi-view video to extract motion information or reflection models is another field of research that greatly benefits from high-quality input data. Building a recording setup for multi-view video involves a great effort on the hardware as well as the software side. The amount of image data to be processed is huge, a decent lighting and camera setup is essential for a naturalistic scene appearance and robust background subtraction, and the computing infrastructure has to enable real-time processing of the recorded material. This paper describes the recording setup for multi-view video acquisition that enables the synchronized recording of dynamic scenes from multiple camera positions under controlled conditions. The requirements to the room and their implementation in the separate components of the studio are described in detail. The efficiency and flexibility of the room is demonstrated on the basis of the results that we obtain with a real-time 3D scene reconstruction system, a system for non-intrusive optical motion capture and a model-based free-viewpoint video system for human actors. ~
Export
BibTeX
@techreport{TheobaltMingMagnorSeidel2003, TITLE = {A flexible and versatile studio for synchronized multi-view video recording}, AUTHOR = {Theobalt, Christian and Li, Ming and Magnor, Marcus A. and Seidel, Hans-Peter}, LANGUAGE = {eng}, NUMBER = {MPI-I-2003-4-002}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {In recent years, the convergence of Computer Vision and Computer Graphics has put forth new research areas that work on scene reconstruction from and analysis of multi-view video footage. In free-viewpoint video, for example, new views of a scene are generated from an arbitrary viewpoint in real-time from a set of real multi-view input video streams. The analysis of real-world scenes from multi-view video to extract motion information or reflection models is another field of research that greatly benefits from high-quality input data. Building a recording setup for multi-view video involves a great effort on the hardware as well as the software side. The amount of image data to be processed is huge, a decent lighting and camera setup is essential for a naturalistic scene appearance and robust background subtraction, and the computing infrastructure has to enable real-time processing of the recorded material. This paper describes the recording setup for multi-view video acquisition that enables the synchronized recording of dynamic scenes from multiple camera positions under controlled conditions. The requirements to the room and their implementation in the separate components of the studio are described in detail. The efficiency and flexibility of the room is demonstrated on the basis of the results that we obtain with a real-time 3D scene reconstruction system, a system for non-intrusive optical motion capture and a model-based free-viewpoint video system for human actors. ~}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Theobalt, Christian %A Li, Ming %A Magnor, Marcus A. %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A flexible and versatile studio for synchronized multi-view video recording : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-6AF2-6 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2003 %P 18 p. %X In recent years, the convergence of Computer Vision and Computer Graphics has put forth new research areas that work on scene reconstruction from and analysis of multi-view video footage. In free-viewpoint video, for example, new views of a scene are generated from an arbitrary viewpoint in real-time from a set of real multi-view input video streams. The analysis of real-world scenes from multi-view video to extract motion information or reflection models is another field of research that greatly benefits from high-quality input data. Building a recording setup for multi-view video involves a great effort on the hardware as well as the software side. The amount of image data to be processed is huge, a decent lighting and camera setup is essential for a naturalistic scene appearance and robust background subtraction, and the computing infrastructure has to enable real-time processing of the recorded material. This paper describes the recording setup for multi-view video acquisition that enables the synchronized recording of dynamic scenes from multiple camera positions under controlled conditions. The requirements to the room and their implementation in the separate components of the studio are described in detail. The efficiency and flexibility of the room is demonstrated on the basis of the results that we obtain with a real-time 3D scene reconstruction system, a system for non-intrusive optical motion capture and a model-based free-viewpoint video system for human actors. ~ %B Research Report / Max-Planck-Institut f&#252;r Informatik
Theobalt, C., Carranza, J., Magnor, M., and Seidel, H.-P. 2003c. A Parallel Framework for Silhouette-based Human Motion Capture. Vision, Modeling and Visualization 2003 (VMV 2003), Akademische Verlagsgesellschaft Aka.
Export
BibTeX
@inproceedings{Theobalt-et-al_VMV03, TITLE = {A Parallel Framework for Silhouette-based Human Motion Capture}, AUTHOR = {Theobalt, Christian and Carranza, Joel and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-89838-048-3}, LOCALID = {Local-ID: C125675300671F7B-37949DA71D14A98DC1256D8A004BEA22-TheobaltVMV2003}, PUBLISHER = {Akademische Verlagsgesellschaft Aka}, YEAR = {2003}, DATE = {2003}, BOOKTITLE = {Vision, Modeling and Visualization 2003 (VMV 2003)}, EDITOR = {Ertl, Thomas and Girod, Bernd and Greiner, G{\"u}nther and Niemann, Heinrich and Seidel, Hans-Peter and Steinbach, Eckehard and Westermann, R{\"u}diger}, PAGES = {207--214}, ADDRESS = {Munich, Germany}, }
Endnote
%0 Conference Proceedings %A Theobalt, Christian %A Carranza, Joel %A Magnor, Marcus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Programming Logics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Parallel Framework for Silhouette-based Human Motion Capture : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2C32-D %F EDOC: 201922 %F OTHER: Local-ID: C125675300671F7B-37949DA71D14A98DC1256D8A004BEA22-TheobaltVMV2003 %D 2003 %B International Fall Workshop on Vision, Modeling and Visualization 2003 %Z date of event: 2003-11-19 - 2003-11-21 %C Munich, Germany %B Vision, Modeling and Visualization 2003 %E Ertl, Thomas; Girod, Bernd; Greiner, G&#252;nther; Niemann, Heinrich; Seidel, Hans-Peter; Steinbach, Eckehard; Westermann, R&#252;diger %P 207 - 214 %I Akademische Verlagsgesellschaft Aka %@ 3-89838-048-3
Theobalt, C., Li, M., Magnor, M.A., and Seidel, H.-P. 2003d. A Flexible and Versatile Studio for Synchronized Multi-view Video Recording. Vision, Video, and Graphics 2003, Eurographics.
Abstract
In recent years, the convergence of computer vision and computer graphics has <br>put forth <br>new research areas that work on scene reconstruction from and analysis of <br>multi-view video<br>footage. In free-viewpoint video, for example, new views of a scene are <br>generated from an arbitrary viewpoint <br>in real-time using a set of multi-view video streams as inputs. <br>The analysis of real-world scenes from multi-view video<br>to extract motion information or reflection models is another field of research <br>that <br>greatly benefits from high-quality input data. <br>Building a recording setup for multi-view video involves a great effort on the <br>hardware<br>as well as the software side. The amount of image data to be processed is huge,<br>a decent lighting and camera setup is essential for a naturalistic scene <br>appearance and <br>robust background subtraction, and the computing infrastructure has to enable <br>real-time processing of the recorded material.<br>This paper describes our recording setup for multi-view video acquisition that <br>enables the <br>synchronized recording <br>of dynamic scenes from multiple camera positions under controlled conditions. <br>The requirements<br>to the room and their implementation in the separate components of the studio <br>are described in detail.<br>The efficiency and flexibility of the room is demonstrated on the basis of the <br>results<br>that we obtain with a real-time 3D scene reconstruction system, a system for <br>non-intrusive optical <br>motion capture and a model-based free-viewpoint video system for human actors.
Export
BibTeX
@inproceedings{Theobalt-et-al_VVG03, TITLE = {A Flexible and Versatile Studio for Synchronized Multi-view Video Recording}, AUTHOR = {Theobalt, Christian and Li, Ming and Magnor, Marcus A. and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-905673-54-1}, DOI = {10.2312/vvg.20031002}, LOCALID = {Local-ID: C1256BDE005F57A8-30E4AD7EA98533D1C1256E14003CECFA-Theobalt:2003:FVS}, PUBLISHER = {Eurographics}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {In recent years, the convergence of computer vision and computer graphics has <br>put forth <br>new research areas that work on scene reconstruction from and analysis of <br>multi-view video<br>footage. In free-viewpoint video, for example, new views of a scene are <br>generated from an arbitrary viewpoint <br>in real-time using a set of multi-view video streams as inputs. <br>The analysis of real-world scenes from multi-view video<br>to extract motion information or reflection models is another field of research <br>that <br>greatly benefits from high-quality input data. <br>Building a recording setup for multi-view video involves a great effort on the <br>hardware<br>as well as the software side. The amount of image data to be processed is huge,<br>a decent lighting and camera setup is essential for a naturalistic scene <br>appearance and <br>robust background subtraction, and the computing infrastructure has to enable <br>real-time processing of the recorded material.<br>This paper describes our recording setup for multi-view video acquisition that <br>enables the <br>synchronized recording <br>of dynamic scenes from multiple camera positions under controlled conditions. <br>The requirements<br>to the room and their implementation in the separate components of the studio <br>are described in detail.<br>The efficiency and flexibility of the room is demonstrated on the basis of the <br>results<br>that we obtain with a real-time 3D scene reconstruction system, a system for <br>non-intrusive optical <br>motion capture and a model-based free-viewpoint video system for human actors.}, BOOKTITLE = {Vision, Video, and Graphics 2003}, EDITOR = {Hall, Peter and Willis, Philip}, PAGES = {9--16}, ADDRESS = {Bath, UK}, }
Endnote
%0 Conference Proceedings %A Theobalt, Christian %A Li, Ming %A Magnor, Marcus A. %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Flexible and Versatile Studio for Synchronized Multi-view Video Recording : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2BFD-F %F EDOC: 201856 %F OTHER: Local-ID: C1256BDE005F57A8-30E4AD7EA98533D1C1256E14003CECFA-Theobalt:2003:FVS %R 10.2312/vvg.20031002 %D 2003 %B VVG 2003 %Z date of event: 2003-07-10 - 2003-07-11 %C Bath, UK %X In recent years, the convergence of computer vision and computer graphics has <br>put forth <br>new research areas that work on scene reconstruction from and analysis of <br>multi-view video<br>footage. In free-viewpoint video, for example, new views of a scene are <br>generated from an arbitrary viewpoint <br>in real-time using a set of multi-view video streams as inputs. <br>The analysis of real-world scenes from multi-view video<br>to extract motion information or reflection models is another field of research <br>that <br>greatly benefits from high-quality input data. <br>Building a recording setup for multi-view video involves a great effort on the <br>hardware<br>as well as the software side. The amount of image data to be processed is huge,<br>a decent lighting and camera setup is essential for a naturalistic scene <br>appearance and <br>robust background subtraction, and the computing infrastructure has to enable <br>real-time processing of the recorded material.<br>This paper describes our recording setup for multi-view video acquisition that <br>enables the <br>synchronized recording <br>of dynamic scenes from multiple camera positions under controlled conditions. <br>The requirements<br>to the room and their implementation in the separate components of the studio <br>are described in detail.<br>The efficiency and flexibility of the room is demonstrated on the basis of the <br>results<br>that we obtain with a real-time 3D scene reconstruction system, a system for <br>non-intrusive optical <br>motion capture and a model-based free-viewpoint video system for human actors. %B Vision, Video, and Graphics 2003 %E Hall, Peter; Willis, Philip %P 9 - 16 %I Eurographics %@ 3-905673-54-1
Theisel, H., Rössl, C., and Seidel, H.-P. 2003a. Using Feature Flow Fields for Topological Comparison of Vector Fields. Vision, Modeling and Visualization 2003 (VMV 03), Akademische Verlagsgesellschaft Aka.
Abstract
In this paper we propose a new topology based metric for 2D vector<br>fields. This metric is based on the concept of feature flow<br>fields. We show that it incorporates both the characteristics and<br>the local distribution of the critical points while keeping the<br>computing time reasonably small even for topologically complex<br>vector fields. Finally, we apply the metric to track the<br>topological behavior in a time-dependent vector field, and to<br>evaluate a smoothing procedure on a noisy steady vector field.
Export
BibTeX
@inproceedings{Theisel2003_vmv, TITLE = {Using Feature Flow Fields for Topological Comparison of Vector Fields}, AUTHOR = {Theisel, Holger and R{\"o}ssl, Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-89838-048-3}, LOCALID = {Local-ID: C125675300671F7B-AF66E5B69EBC9F31C1256D79006C4D60-Theisel2003_vmv}, PUBLISHER = {Akademische Verlagsgesellschaft Aka}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {In this paper we propose a new topology based metric for 2D vector<br>fields. This metric is based on the concept of feature flow<br>fields. We show that it incorporates both the characteristics and<br>the local distribution of the critical points while keeping the<br>computing time reasonably small even for topologically complex<br>vector fields. Finally, we apply the metric to track the<br>topological behavior in a time-dependent vector field, and to<br>evaluate a smoothing procedure on a noisy steady vector field.}, BOOKTITLE = {Vision, Modeling and Visualization 2003 (VMV 03)}, EDITOR = {Girod, Bernd and Greiner, G{\"u}nther and Niemann, Heinrich and Seidel, Hans-Peter and Ertl, Thomas and Steinbach, Eckehard and Westermann, R{\"u}diger}, PAGES = {521--528}, ADDRESS = {Munich, Germany}, }
Endnote
%0 Conference Proceedings %A Theisel, Holger %A R&#246;ssl, Christian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Using Feature Flow Fields for Topological Comparison of Vector Fields : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2E7F-6 %F EDOC: 201949 %F OTHER: Local-ID: C125675300671F7B-AF66E5B69EBC9F31C1256D79006C4D60-Theisel2003_vmv %D 2003 %B International Fall Workshop on Vision, Modeling and Visualization 2003 %Z date of event: 2003-11-19 - 2003-11-21 %C Munich, Germany %X In this paper we propose a new topology based metric for 2D vector<br>fields. This metric is based on the concept of feature flow<br>fields. We show that it incorporates both the characteristics and<br>the local distribution of the critical points while keeping the<br>computing time reasonably small even for topologically complex<br>vector fields. Finally, we apply the metric to track the<br>topological behavior in a time-dependent vector field, and to<br>evaluate a smoothing procedure on a noisy steady vector field. %B Vision, Modeling and Visualization 2003 %E Girod, Bernd; Greiner, G&#252;nther; Niemann, Heinrich; Seidel, Hans-Peter; Ertl, Thomas; Steinbach, Eckehard; Westermann, R&#252;diger %P 521 - 528 %I Akademische Verlagsgesellschaft Aka %@ 3-89838-048-3
Theisel, H., Rössl, C., and Seidel, H.-P. 2003b. Compression of 2D Vector Fields under Guaranteed Topology Preservation. Computer Graphics Forum22, 3.
Abstract
In this paper we introduce a new compression technique for 2D vector fields <br>which preserves the complete topology, i.e., the critical points and the <br>connectivity of the separatrices. As the theoretical foundation of the <br>algorithm, we show in a theorem that for local modifications of a vector field, <br>it is possible to decide entirely by a local analysis whether or not the global <br>topology is preserved. This result is applied in a compression algorithm which <br>is based on a repeated local modification of the vector field - namely a <br>repeated edge collapse of the underlying piecewise linear domain. We apply the <br>compression technique to a number of data sets with a complex topology and <br>obtain significantly improved compression ratios in comparison to pre-existing <br>topology-preserving techniques.
Export
BibTeX
@article{Theisel-et-al_EUROGRAPHICS03, TITLE = {Compression of {2D} Vector Fields under Guaranteed Topology Preservation}, AUTHOR = {Theisel, Holger and R{\"o}ssl, Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/1467-8659.00680}, LOCALID = {Local-ID: C125675300671F7B-21FE90C7C981A5E3C1256D050047DB4B-Theisel:2003:CVFGTP}, PUBLISHER = {Blackwell-Wiley}, PUBLISHER = {Blackwell}, ADDRESS = {Oxford}, ADDRESS = {Oxford, UK}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {In this paper we introduce a new compression technique for 2D vector fields <br>which preserves the complete topology, i.e., the critical points and the <br>connectivity of the separatrices. As the theoretical foundation of the <br>algorithm, we show in a theorem that for local modifications of a vector field, <br>it is possible to decide entirely by a local analysis whether or not the global <br>topology is preserved. This result is applied in a compression algorithm which <br>is based on a repeated local modification of the vector field -- namely a <br>repeated edge collapse of the underlying piecewise linear domain. We apply the <br>compression technique to a number of data sets with a complex topology and <br>obtain significantly improved compression ratios in comparison to pre-existing <br>topology-preserving techniques.}, JOURNAL = {Computer Graphics Forum}, VOLUME = {22}, NUMBER = {3}, PAGES = {333--342}, BOOKTITLE = {EUROGRAPHICS 2003}, EDITOR = {Brunet, Pere and Fellner, Dieter W.}, }
Endnote
%0 Journal Article %A Theisel, Holger %A R&#246;ssl, Christian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Compression of 2D Vector Fields under Guaranteed Topology Preservation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2C98-B %F EDOC: 202046 %F OTHER: Local-ID: C125675300671F7B-21FE90C7C981A5E3C1256D050047DB4B-Theisel:2003:CVFGTP %R 10.1111/1467-8659.00680 %D 2003 %X In this paper we introduce a new compression technique for 2D vector fields <br>which preserves the complete topology, i.e., the critical points and the <br>connectivity of the separatrices. As the theoretical foundation of the <br>algorithm, we show in a theorem that for local modifications of a vector field, <br>it is possible to decide entirely by a local analysis whether or not the global <br>topology is preserved. This result is applied in a compression algorithm which <br>is based on a repeated local modification of the vector field - namely a <br>repeated edge collapse of the underlying piecewise linear domain. We apply the <br>compression technique to a number of data sets with a complex topology and <br>obtain significantly improved compression ratios in comparison to pre-existing <br>topology-preserving techniques. %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 22 %N 3 %& 333 %P 333 - 342 %I Blackwell-Wiley %C Oxford %@ false %B EUROGRAPHICS 2003 %I Blackwell %C Oxford, UK
Theisel, H., Rössl, C., and Seidel, H.-P. 2003c. Combining Topological Simplification and Topology Preserving Compression for 2D Vector Fields. Proceedings of the 11th Pacific Conference on Computer Graphics and Applications (PG 2003), IEEE.
Abstract
Topological simplification techniques and topology preserving<br>compression approaches for 2D vector fields have<br>been developed quite independently of each other. In this<br>paper we propose a combination of both approaches: a vector<br>field should be compressed in such a way that its important<br>topological features (both critical points and separatrices)<br>are preserved while its unimportant features are allowed<br>to collapse and disappear. To do so, a number of new<br>solutions and modifications of pre-existing algorithms are<br>presented. We apply the approach to a flow data set which,<br>is both large and topologically complex, and achieve significant<br>compression ratios there.
Export
BibTeX
@inproceedings{Theisel-et-al_PG03, TITLE = {Combining Topological Simplification and Topology Preserving Compression for {2D} Vector Fields}, AUTHOR = {Theisel, Holger and R{\"o}ssl, Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-2028-6}, DOI = {10.1109/PCCGA.2003.1238287}, LOCALID = {Local-ID: C125675300671F7B-788BE51D4011CCC5C1256D64005C4F4A-TheiselPG2003}, PUBLISHER = {IEEE}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {Topological simplification techniques and topology preserving<br>compression approaches for 2D vector fields have<br>been developed quite independently of each other. In this<br>paper we propose a combination of both approaches: a vector<br>field should be compressed in such a way that its important<br>topological features (both critical points and separatrices)<br>are preserved while its unimportant features are allowed<br>to collapse and disappear. To do so, a number of new<br>solutions and modifications of pre-existing algorithms are<br>presented. We apply the approach to a flow data set which,<br>is both large and topologically complex, and achieve significant<br>compression ratios there.}, BOOKTITLE = {Proceedings of the 11th Pacific Conference on Computer Graphics and Applications (PG 2003)}, EDITOR = {Rokne, Jon and Klein, Reinhard and Wang, Wenping}, PAGES = {419--423}, ADDRESS = {Canmore, Canada}, }
Endnote
%0 Conference Proceedings %A Theisel, Holger %A R&#246;ssl, Christian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Combining Topological Simplification and Topology Preserving Compression for 2D Vector Fields : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2C8C-7 %F EDOC: 201973 %F OTHER: Local-ID: C125675300671F7B-788BE51D4011CCC5C1256D64005C4F4A-TheiselPG2003 %R 10.1109/PCCGA.2003.1238287 %D 2003 %B 11th Pacific Conference on Computer Graphics and Applications %Z date of event: 2003-10-08 - 2003-10-10 %C Canmore, Canada %X Topological simplification techniques and topology preserving<br>compression approaches for 2D vector fields have<br>been developed quite independently of each other. In this<br>paper we propose a combination of both approaches: a vector<br>field should be compressed in such a way that its important<br>topological features (both critical points and separatrices)<br>are preserved while its unimportant features are allowed<br>to collapse and disappear. To do so, a number of new<br>solutions and modifications of pre-existing algorithms are<br>presented. We apply the approach to a flow data set which,<br>is both large and topologically complex, and achieve significant<br>compression ratios there. %B Proceedings of the 11th Pacific Conference on Computer Graphics and Applications %E Rokne, Jon; Klein, Reinhard; Wang, Wenping %P 419 - 423 %I IEEE %@ 0-7695-2028-6
Theisel, H., Weinkauf, T., Hege, H.-C., and Seidel, H.-P. 2003d. Saddle Connectors - An Approach to Visualizing the Topological Skeleton of Complex 3D Vector Fields. IEEE Visualization 2003, IEEE.
Export
BibTeX
@inproceedings{Theisel-et-el_VIS03, TITLE = {Saddle Connectors -- An Approach to Visualizing the Topological Skeleton of Complex {3D} Vector Fields}, AUTHOR = {Theisel, Holger and Weinkauf, Tino and Hege, Hans-Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7803-8120-3}, DOI = {10.1109/VISUAL.2003.1250376}, LOCALID = {Local-ID: C125675300671F7B-9844E96616128021C1256D3F0065F548-Theisel2003c}, PUBLISHER = {IEEE}, YEAR = {2003}, DATE = {2003}, BOOKTITLE = {IEEE Visualization 2003}, EDITOR = {Turk, Greg and van Wijk, Jarke and Moorhead, Robert}, PAGES = {225--232}, ADDRESS = {Seattle, USA}, }
Endnote
%0 Conference Proceedings %A Theisel, Holger %A Weinkauf, Tino %A Hege, Hans-Christian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Saddle Connectors - An Approach to Visualizing the Topological Skeleton of Complex 3D Vector Fields : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2DF9-7 %F EDOC: 201858 %F OTHER: Local-ID: C125675300671F7B-9844E96616128021C1256D3F0065F548-Theisel2003c %R 10.1109/VISUAL.2003.1250376 %D 2003 %B IEEE Conference on Visualization 2003 %Z date of event: 2003-10-19 - 2003-10-24 %C Seattle, USA %B IEEE Visualization 2003 %E Turk, Greg; van Wijk, Jarke; Moorhead, Robert %P 225 - 232 %I IEEE %@ 0-7803-8120-3
Theisel, H. and Seidel, H.-P. 2003. Feature Flow Fields. Data Visualisation 2003 (VisSym-03) : Joint Eurographics / IEEE TCVG Symposium on Visualization, Eurographics.
Export
BibTeX
@inproceedings{Theisel-Seidel_VisSym03, TITLE = {Feature Flow Fields}, AUTHOR = {Theisel, Holger and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-58113-698-2}, DOI = {10.2312/VisSym/VisSym03/141-148}, LOCALID = {Local-ID: C125675300671F7B-2B84E27FB07E2F3CC1256CE90041DD37-Theisel2003a}, PUBLISHER = {Eurographics}, YEAR = {2003}, DATE = {2003}, BOOKTITLE = {Data Visualisation 2003 (VisSym-03) : Joint Eurographics / IEEE TCVG Symposium on Visualization}, EDITOR = {Bonneau, Georges-Pierre and Hahmann, Stefanie and Hansen, Charles}, PAGES = {141--148}, ADDRESS = {Grenoble, France}, }
Endnote
%0 Conference Proceedings %A Theisel, Holger %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Feature Flow Fields : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2D06-9 %F EDOC: 201838 %F OTHER: Local-ID: C125675300671F7B-2B84E27FB07E2F3CC1256CE90041DD37-Theisel2003a %R 10.2312/VisSym/VisSym03/141-148 %D 2003 %B Joint EUROGRAPHICS - IEEE TCVG Symposium on Visualization %Z date of event: 2003-05-26 - 2003-05-28 %C Grenoble, France %B Data Visualisation 2003 (VisSym-03) : Joint Eurographics / IEEE TCVG Symposium on Visualization %E Bonneau, Georges-Pierre; Hahmann, Stefanie; Hansen, Charles %P 141 - 148 %I Eurographics %@ 978-1-58113-698-2
Tarini, M., Lensch, H.P.A., Gösele, M., and Seidel, H.-P. 2003. 3D acquisition of mirroring objects. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
Objects with mirroring optical characteristics are left out of the scope of most 3D scanning methods. We present here a new automatic acquisition approach, shape-from-distortion, that focuses on that category of objects, requires only a still camera and a color monitor, and produces range scans (plus a normal and a reflectance map) of the target. Our technique consists of two steps: first, an improved environment matte is captured for the mirroring object, using the interference of patterns with different frequencies in order to obtain sub-pixel accuracy. Then, the matte is converted into a normal and a depth map by exploiting the self coherence of a surface when integrating the normal map along different paths. The results show very high accuracy, capturing even smallest surface details. The acquired depth maps can be further processed using standard techniques to produce a complete 3D mesh of the object.
Export
BibTeX
@techreport{TariniLenschGoeseleSeidel2003, TITLE = {{3D} acquisition of mirroring objects}, AUTHOR = {Tarini, Marco and Lensch, Hendrik P. A. and G{\"o}sele, Michael and Seidel, Hans-Peter}, LANGUAGE = {eng}, NUMBER = {MPI-I-2003-4-001}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {Objects with mirroring optical characteristics are left out of the scope of most 3D scanning methods. We present here a new automatic acquisition approach, shape-from-distortion, that focuses on that category of objects, requires only a still camera and a color monitor, and produces range scans (plus a normal and a reflectance map) of the target. Our technique consists of two steps: first, an improved environment matte is captured for the mirroring object, using the interference of patterns with different frequencies in order to obtain sub-pixel accuracy. Then, the matte is converted into a normal and a depth map by exploiting the self coherence of a surface when integrating the normal map along different paths. The results show very high accuracy, capturing even smallest surface details. The acquired depth maps can be further processed using standard techniques to produce a complete 3D mesh of the object.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Tarini, Marco %A Lensch, Hendrik P. A. %A G&#246;sele, Michael %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T 3D acquisition of mirroring objects : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-6AF5-F %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2003 %P 37 p. %X Objects with mirroring optical characteristics are left out of the scope of most 3D scanning methods. We present here a new automatic acquisition approach, shape-from-distortion, that focuses on that category of objects, requires only a still camera and a color monitor, and produces range scans (plus a normal and a reflectance map) of the target. Our technique consists of two steps: first, an improved environment matte is captured for the mirroring object, using the interference of patterns with different frequencies in order to obtain sub-pixel accuracy. Then, the matte is converted into a normal and a depth map by exploiting the self coherence of a surface when integrating the normal map along different paths. The results show very high accuracy, capturing even smallest surface details. The acquired depth maps can be further processed using standard techniques to produce a complete 3D mesh of the object. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Rössl, C., Zeilfelder, F., Nürnberger, G., and Seidel, H.-P. 2003a. Visualization of volume data with quadratic super splines. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
We develop a new approach to reconstruct non-discrete models from gridded volume samples. As a model, we use quadratic, trivariate super splines on a uniform tetrahedral partition $\Delta$. The approximating splines are determined in a natural and completely symmetric way by averaging local data samples such that appropriate smoothness conditions are automatically satisfied. On each tetrahedron of $\Delta$ , the spline is a polynomial of total degree two which provides several advantages including the e cient computation, evaluation and visualization of the model. We apply Bernstein-B{\´e}zier techniques wellknown in Computer Aided Geometric Design to compute and evaluate the trivariate spline and its gradient. With this approach the volume data can be visualized e ciently e.g. with isosurface ray-casting. Along an arbitrary ray the splines are univariate, piecewise quadratics and thus the exact intersection for a prescribed isovalue can be easily determined in an analytic and exact way. Our results confirm the e ciency of the method and demonstrate a high visual quality for rendered isosurfaces.
Export
BibTeX
@techreport{RoesslZeilfelderNurnbergerSeidel2003, TITLE = {Visualization of volume data with quadratic super splines}, AUTHOR = {R{\"o}ssl, Christian and Zeilfelder, Frank and N{\"u}rnberger, G{\"u}nther and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2004-4-006}, NUMBER = {MPI-I-2004-4-006}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {We develop a new approach to reconstruct non-discrete models from gridded volume samples. As a model, we use quadratic, trivariate super splines on a uniform tetrahedral partition $\Delta$. The approximating splines are determined in a natural and completely symmetric way by averaging local data samples such that appropriate smoothness conditions are automatically satisfied. On each tetrahedron of $\Delta$ , the spline is a polynomial of total degree two which provides several advantages including the e cient computation, evaluation and visualization of the model. We apply Bernstein-B{\&#180;e}zier techniques wellknown in Computer Aided Geometric Design to compute and evaluate the trivariate spline and its gradient. With this approach the volume data can be visualized e ciently e.g. with isosurface ray-casting. Along an arbitrary ray the splines are univariate, piecewise quadratics and thus the exact intersection for a prescribed isovalue can be easily determined in an analytic and exact way. Our results confirm the e ciency of the method and demonstrate a high visual quality for rendered isosurfaces.}, TYPE = {Research Report}, }
Endnote
%0 Report %A R&#246;ssl, Christian %A Zeilfelder, Frank %A N&#252;rnberger, G&#252;nther %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Visualization of volume data with quadratic super splines : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-6AE8-D %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2004-4-006 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2003 %P 15 p. %X We develop a new approach to reconstruct non-discrete models from gridded volume samples. As a model, we use quadratic, trivariate super splines on a uniform tetrahedral partition $\Delta$. The approximating splines are determined in a natural and completely symmetric way by averaging local data samples such that appropriate smoothness conditions are automatically satisfied. On each tetrahedron of $\Delta$ , the spline is a polynomial of total degree two which provides several advantages including the e cient computation, evaluation and visualization of the model. We apply Bernstein-B{\&#180;e}zier techniques wellknown in Computer Aided Geometric Design to compute and evaluate the trivariate spline and its gradient. With this approach the volume data can be visualized e ciently e.g. with isosurface ray-casting. Along an arbitrary ray the splines are univariate, piecewise quadratics and thus the exact intersection for a prescribed isovalue can be easily determined in an analytic and exact way. Our results confirm the e ciency of the method and demonstrate a high visual quality for rendered isosurfaces. %B Research Report
Rössl, C., Ivrissimtzis, I., and Seidel, H.-P. 2003b. Tree-based Triangle Mesh Connectivity Encoding. Curve and Surface Fitting: Saint-Malo 2002, Nashboro Press.
Abstract
We present a divide and conquer algorithm for triangle mesh connectivity encoding. As the algorithm traverses the mesh it constructs a weighted binary tree that holds all information required for reconstruction. This representation can be used for compression.We derive a new iterative single-pass decoding algorithm, and we show how to exploit the tree data structure for generating stripifications for efficient rendering that come with a guaranteed cost saving.
Export
BibTeX
@inproceedings{Roessl:TBTMCE:2002, TITLE = {Tree-based Triangle Mesh Connectivity Encoding}, AUTHOR = {R{\"o}ssl, Christian and Ivrissimtzis, Ioannis and Seidel, Hans-Peter}, EDITOR = {Cohen, Albert and Merrien, Jean-Louis and Schumaker, Larry L.}, LANGUAGE = {eng}, ISBN = {0-9728482-1-5}, LOCALID = {Local-ID: C125675300671F7B-892629445F84CDE3C1256D1100313849-Roessl:TBTMCE:2002}, PUBLISHER = {Nashboro Press}, YEAR = {2002}, DATE = {2003}, ABSTRACT = {We present a divide and conquer algorithm for triangle mesh connectivity encoding. As the algorithm traverses the mesh it constructs a weighted binary tree that holds all information required for reconstruction. This representation can be used for compression.We derive a new iterative single-pass decoding algorithm, and we show how to exploit the tree data structure for generating stripifications for efficient rendering that come with a guaranteed cost saving.}, BOOKTITLE = {Curve and Surface Fitting: Saint-Malo 2002}, PAGES = {345--354}, ADDRESS = {Saint Malo, France}, }
Endnote
%0 Conference Proceedings %A R&#246;ssl, Christian %A Ivrissimtzis, Ioannis %A Seidel, Hans-Peter %E Cohen, Albert %E Merrien, Jean-Louis %E Schumaker, Larry L. %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Max Planck Society Max Planck Society Max Planck Society %T Tree-based Triangle Mesh Connectivity Encoding : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2E74-B %F EDOC: 201952 %F OTHER: Local-ID: C125675300671F7B-892629445F84CDE3C1256D1100313849-Roessl:TBTMCE:2002 %D 2003 %B Curve and Surface Fitting 2002 %Z date of event: 2002-10-09 - 2002-10-11 %C Saint Malo, France %X We present a divide and conquer algorithm for triangle mesh connectivity encoding. As the algorithm traverses the mesh it constructs a weighted binary tree that holds all information required for reconstruction. This representation can be used for compression.We derive a new iterative single-pass decoding algorithm, and we show how to exploit the tree data structure for generating stripifications for efficient rendering that come with a guaranteed cost saving. %B Curve and Surface Fitting: Saint-Malo 2002 %P 345 - 354 %I Nashboro Press %@ 0-9728482-1-5
Rössl, C., Zeilfelder, F., Nürnberger, G., and Seidel, H.-P. 2003c. Visualization of Volume Data with Quadratic Super Splines. IEEE Visualization 2003, IEEE.
Abstract
We develop a new approach to reconstruct non-discrete models from<br>gridded volume samples. As a model, we use quadratic trivariate super<br>splines on a uniform tetrahedral partition . The approximating splines<br>are determined in a natural and completely symmetric way by averaging<br>local data samples, such that appropriate smoothness conditions are<br>automatically satisfied. On each tetrahedron of , the<br>quasi-interpolating spline is a polynomial of total degree two which<br>provides several advantages including efficient computation,<br>evaluation and visualization of the model. We apply Bernstein-B´ezier<br>techniques well-known in CAGD to compute and evaluate the trivariate<br>spline and its gradient. With this approach the volume data can be<br>visualized efficiently e.g. with isosurface raycasting. Along an<br>arbitrary ray the splines are univariate, piecewise quadratics and<br>thus the exact intersection for a prescribed isovalue can be easily<br>determined in an analytic and exact way. Our results confirm the<br>efficiency of the quasi-interpolating method and demonstrate high<br>visual quality for rendered isosurfaces.
Export
BibTeX
@inproceedings{Rossl-et-al_VIS03, TITLE = {Visualization of Volume Data with Quadratic Super Splines}, AUTHOR = {R{\"o}ssl, Christian and Zeilfelder, Frank and N{\"u}rnberger, G{\"u}nther and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7803-8120-3}, DOI = {10.1109/VISUAL.2003.1250399}, LOCALID = {Local-ID: C125675300671F7B-667BB45D09712520C1256D41005222E9-RoesslZeilfelderNuernbergerSeidel2003}, PUBLISHER = {IEEE}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {We develop a new approach to reconstruct non-discrete models from<br>gridded volume samples. As a model, we use quadratic trivariate super<br>splines on a uniform tetrahedral partition . The approximating splines<br>are determined in a natural and completely symmetric way by averaging<br>local data samples, such that appropriate smoothness conditions are<br>automatically satisfied. On each tetrahedron of , the<br>quasi-interpolating spline is a polynomial of total degree two which<br>provides several advantages including efficient computation,<br>evaluation and visualization of the model. We apply Bernstein-B&#180;ezier<br>techniques well-known in CAGD to compute and evaluate the trivariate<br>spline and its gradient. With this approach the volume data can be<br>visualized efficiently e.g. with isosurface raycasting. Along an<br>arbitrary ray the splines are univariate, piecewise quadratics and<br>thus the exact intersection for a prescribed isovalue can be easily<br>determined in an analytic and exact way. Our results confirm the<br>efficiency of the quasi-interpolating method and demonstrate high<br>visual quality for rendered isosurfaces.}, BOOKTITLE = {IEEE Visualization 2003}, EDITOR = {Turk, Greg and van Wijk, Jarke and Moorhead, Robert}, PAGES = {393--400}, ADDRESS = {Seattle, WA, USA}, }
Endnote
%0 Conference Proceedings %A R&#246;ssl, Christian %A Zeilfelder, Frank %A N&#252;rnberger, G&#252;nther %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Visualization of Volume Data with Quadratic Super Splines : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2E8B-8 %F EDOC: 201839 %F OTHER: Local-ID: C125675300671F7B-667BB45D09712520C1256D41005222E9-RoesslZeilfelderNuernbergerSeidel2003 %R 10.1109/VISUAL.2003.1250399 %D 2003 %B IEEE Conference on Visualization 2003 %Z date of event: 2003-10-19 - 2003-10-24 %C Seattle, WA, USA %X We develop a new approach to reconstruct non-discrete models from<br>gridded volume samples. As a model, we use quadratic trivariate super<br>splines on a uniform tetrahedral partition . The approximating splines<br>are determined in a natural and completely symmetric way by averaging<br>local data samples, such that appropriate smoothness conditions are<br>automatically satisfied. On each tetrahedron of , the<br>quasi-interpolating spline is a polynomial of total degree two which<br>provides several advantages including efficient computation,<br>evaluation and visualization of the model. We apply Bernstein-B&#180;ezier<br>techniques well-known in CAGD to compute and evaluate the trivariate<br>spline and its gradient. With this approach the volume data can be<br>visualized efficiently e.g. with isosurface raycasting. Along an<br>arbitrary ray the splines are univariate, piecewise quadratics and<br>thus the exact intersection for a prescribed isovalue can be easily<br>determined in an analytic and exact way. Our results confirm the<br>efficiency of the quasi-interpolating method and demonstrate high<br>visual quality for rendered isosurfaces. %B IEEE Visualization 2003 %E Turk, Greg; van Wijk, Jarke; Moorhead, Robert %P 393 - 400 %I IEEE %@ 0-7803-8120-3
Oi, R., Magnor, M., and Aizawa, K. 2003. A Solid-State, Simultaneous Wide Angle-Detailed View Video Surveillance Camera. Vision, Modeling and Visualization 2003 (VMV-03) : proceedings, Akademische Verlagsgesellschaft Aka.
Export
BibTeX
@inproceedings{Oi:2003:SSS, TITLE = {A Solid-State, Simultaneous Wide Angle-Detailed View Video Surveillance Camera}, AUTHOR = {Oi, R. and Magnor, Marcus and Aizawa, K.}, EDITOR = {Ertl, Thomas and Girod, Bernd and Greiner, G{\"u}nther and Niemann, Heinrich and Seidel, Hans-Peter and Steinbach, Eckehard and Westermann, R{\"u}diger}, LANGUAGE = {eng}, ISBN = {3-89839-048-3}, LOCALID = {Local-ID: C1256BDE005F57A8-4260532A3C871E6BC1256E140039C09D-Oi:2003:SSS}, PUBLISHER = {Akademische Verlagsgesellschaft Aka}, YEAR = {2003}, DATE = {2003}, BOOKTITLE = {Vision, Modeling and Visualization 2003 (VMV-03) : proceedings}, PAGES = {329--336}, ADDRESS = {Munich, Germany}, }
Endnote
%0 Conference Proceedings %A Oi, R. %A Magnor, Marcus %A Aizawa, K. %E Ertl, Thomas %E Girod, Bernd %E Greiner, G&#252;nther %E Niemann, Heinrich %E Seidel, Hans-Peter %E Steinbach, Eckehard %E Westermann, R&#252;diger %+ Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Solid-State, Simultaneous Wide Angle-Detailed View Video Surveillance Camera : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2C48-0 %F EDOC: 202042 %F OTHER: Local-ID: C1256BDE005F57A8-4260532A3C871E6BC1256E140039C09D-Oi:2003:SSS %D 2003 %B VMV 2003 %Z date of event: 2003-11-19 - 2003-11-21 %C Munich, Germany %B Vision, Modeling and Visualization 2003 (VMV-03) : proceedings %P 329 - 336 %I Akademische Verlagsgesellschaft Aka %@ 3-89839-048-3
Ohtake, Y., Belyaev, A., and Seidel, H.-P. 2003a. Interpolatory Subdivision Curves via Diffusion of Normals. Proceedings of the 2003 Computer Graphics International (CGI 2003), IEEE Computer Society.
Export
BibTeX
@inproceedings{Ohtake-et-al_CGI03, TITLE = {Interpolatory Subdivision Curves via Diffusion of Normals}, AUTHOR = {Ohtake, Yutaka and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-1946-6}, DOI = {10.1109/CGI.2003.1214443}, LOCALID = {Local-ID: C125675300671F7B-189D01FD6DD9936EC1256CF3006C2D08-cgi03obs}, PUBLISHER = {IEEE Computer Society}, YEAR = {2003}, DATE = {2003}, BOOKTITLE = {Proceedings of the 2003 Computer Graphics International (CGI 2003)}, PAGES = {22--27}, ADDRESS = {Tokyo, Japan}, }
Endnote
%0 Conference Proceedings %A Ohtake, Yutaka %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Interpolatory Subdivision Curves via Diffusion of Normals : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2D52-0 %F EDOC: 201992 %F OTHER: Local-ID: C125675300671F7B-189D01FD6DD9936EC1256CF3006C2D08-cgi03obs %R 10.1109/CGI.2003.1214443 %D 2003 %B Computer Graphics International 2003 %Z date of event: 2003-07-09 - 2003-07-11 %C Tokyo, Japan %B Proceedings of the 2003 Computer Graphics International %P 22 - 27 %I IEEE Computer Society %@ 0-7695-1946-6
Ohtake, Y., Belyaev, A., and Seidel, H.-P. 2003b. A Multi-scale Approach to 3D Scattered Data Interpolation with Compactly Supported Basis Functions. Shape Modeling International 2003 (SMI 2003), IEEE.
Abstract
In this paper, we propose a hierarchical approach to 3D scattered <br>data interpolation with compactly supported basis functions.<br>Our numerical experiments suggest that the approach integrates <br>the best aspects of scattered data fitting with locally and globally <br>supported basis functions. Employing locally supported functions leads<br>to an efficient computational procedure, while a coarse-to-fine <br>hierarchy makes our method insensitive to the density of <br>scattered data and allows us to restore large parts of <br>missed data. <br><br>Given a point cloud distributed along a surface, we first use<br>spatial down sampling to construct a coarse-to-fine hierarchy <br>of point sets. Then we interpolate the sets starting from the <br>coarsest level. We interpolate a point set of the hierarchy,<br>as an offsetting of the interpolating function computed at <br>the previous level. Fig.\,\ref{risu_multi} shows an original<br>point set (the leftmost image) and its coarse-to-fine hierarchy <br>of interpolated sets.<br><br>According to our numerical experiments, the method <br>is essentially faster than the state-of-art scattered data <br>approximation with globally supported RBFs \cite{rbf}<br>and much simpler to implement.
Export
BibTeX
@inproceedings{Ohtake-et-al_SMI03, TITLE = {A Multi-scale Approach to {3D} Scattered Data Interpolation with Compactly Supported Basis Functions}, AUTHOR = {Ohtake, Yutaka and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-0-7695-1909-8}, DOI = {10.1109/SMI.2003.1199611}, LOCALID = {Local-ID: C125675300671F7B-87B4CF5FB4E9EE9AC1256CB7006C0EC9-smi03obs}, PUBLISHER = {IEEE}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {In this paper, we propose a hierarchical approach to 3D scattered <br>data interpolation with compactly supported basis functions.<br>Our numerical experiments suggest that the approach integrates <br>the best aspects of scattered data fitting with locally and globally <br>supported basis functions. Employing locally supported functions leads<br>to an efficient computational procedure, while a coarse-to-fine <br>hierarchy makes our method insensitive to the density of <br>scattered data and allows us to restore large parts of <br>missed data. <br><br>Given a point cloud distributed along a surface, we first use<br>spatial down sampling to construct a coarse-to-fine hierarchy <br>of point sets. Then we interpolate the sets starting from the <br>coarsest level. We interpolate a point set of the hierarchy,<br>as an offsetting of the interpolating function computed at <br>the previous level. Fig.\,\ref{risu_multi} shows an original<br>point set (the leftmost image) and its coarse-to-fine hierarchy <br>of interpolated sets.<br><br>According to our numerical experiments, the method <br>is essentially faster than the state-of-art scattered data <br>approximation with globally supported RBFs \cite{rbf}<br>and much simpler to implement.}, BOOKTITLE = {Shape Modeling International 2003 (SMI 2003)}, EDITOR = {Kim, Myung-Soo}, PAGES = {153--161}, ADDRESS = {Seoul, Korea}, }
Endnote
%0 Conference Proceedings %A Ohtake, Yutaka %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Multi-scale Approach to 3D Scattered Data Interpolation with Compactly Supported Basis Functions : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2C18-9 %F EDOC: 201818 %F OTHER: Local-ID: C125675300671F7B-87B4CF5FB4E9EE9AC1256CB7006C0EC9-smi03obs %R 10.1109/SMI.2003.1199611 %D 2003 %B 2003 International Conference in Shape Modeling %Z date of event: 2003-05-12 - 2003-05-16 %C Seoul, Korea %X In this paper, we propose a hierarchical approach to 3D scattered <br>data interpolation with compactly supported basis functions.<br>Our numerical experiments suggest that the approach integrates <br>the best aspects of scattered data fitting with locally and globally <br>supported basis functions. Employing locally supported functions leads<br>to an efficient computational procedure, while a coarse-to-fine <br>hierarchy makes our method insensitive to the density of <br>scattered data and allows us to restore large parts of <br>missed data. <br><br>Given a point cloud distributed along a surface, we first use<br>spatial down sampling to construct a coarse-to-fine hierarchy <br>of point sets. Then we interpolate the sets starting from the <br>coarsest level. We interpolate a point set of the hierarchy,<br>as an offsetting of the interpolating function computed at <br>the previous level. Fig.\,\ref{risu_multi} shows an original<br>point set (the leftmost image) and its coarse-to-fine hierarchy <br>of interpolated sets.<br><br>According to our numerical experiments, the method <br>is essentially faster than the state-of-art scattered data <br>approximation with globally supported RBFs \cite{rbf}<br>and much simpler to implement. %B Shape Modeling International 2003 %E Kim, Myung-Soo %P 153 - 161 %I IEEE %@ 978-0-7695-1909-8
Ohtake, Y., Belyaev, A., Alexa, M., Turk, G., and Seidel, H.-P. 2003c. Multi-level Partition of Unity Implicits. ACM Transactions on Graphics22, 3.
Abstract
We present a shape representation, the {\em multi-level partition of unity}<br>implicit surface, that allows us to construct surface models from<br>very large sets of points. There are three key ingredients<br>to our approach: 1) piecewise quadratic functions that capture<br>the local shape of the surface, 2) weighting functions (the<br>partitions of unity) that blend together these local shape functions,<br>and 3) an octree subdivision method that adapts to variations in<br>the complexity of the local shape.<br><br>Our approach gives us considerable flexibility in the choice of local<br>shape functions, and in particular we can accurately represent sharp<br>features such as edges and corners by selecting appropriate shape<br>functions. An error-controlled subdivision leads to an adaptive approximation<br>whose time and memory consumption depends on the required accuracy.<br>Due to the separation of local approximation and local blending,<br>the representation is not global <br>and can be created and evaluated rapidly.Because our surfaces are<br>described using implicit functions, operations such as shape blending,<br>offsets, deformations and CSG are simple to perform.
Export
BibTeX
@article{Ohtake-et-al_ACM.Trans.Graph.03, TITLE = {Multi-level Partition of Unity Implicits}, AUTHOR = {Ohtake, Yutaka and Belyaev, Alexander and Alexa, Marc and Turk, Greg and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/882262.882293}, LOCALID = {Local-ID: C125675300671F7B-6F77EFE6032B9F9AC1256CF30069490C-sig03obats}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {We present a shape representation, the {\em multi-level partition of unity}<br>implicit surface, that allows us to construct surface models from<br>very large sets of points. There are three key ingredients<br>to our approach: 1) piecewise quadratic functions that capture<br>the local shape of the surface, 2) weighting functions (the<br>partitions of unity) that blend together these local shape functions,<br>and 3) an octree subdivision method that adapts to variations in<br>the complexity of the local shape.<br><br>Our approach gives us considerable flexibility in the choice of local<br>shape functions, and in particular we can accurately represent sharp<br>features such as edges and corners by selecting appropriate shape<br>functions. An error-controlled subdivision leads to an adaptive approximation<br>whose time and memory consumption depends on the required accuracy.<br>Due to the separation of local approximation and local blending,<br>the representation is not global <br>and can be created and evaluated rapidly.Because our surfaces are<br>described using implicit functions, operations such as shape blending,<br>offsets, deformations and CSG are simple to perform.}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {22}, NUMBER = {3}, PAGES = {463--470}, }
Endnote
%0 Journal Article %A Ohtake, Yutaka %A Belyaev, Alexander %A Alexa, Marc %A Turk, Greg %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Multi-level Partition of Unity Implicits : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2D7E-0 %F EDOC: 201993 %F OTHER: Local-ID: C125675300671F7B-6F77EFE6032B9F9AC1256CF30069490C-sig03obats %R 10.1145/882262.882293 %D 2003 %* Review method: peer-reviewed %X We present a shape representation, the {\em multi-level partition of unity}<br>implicit surface, that allows us to construct surface models from<br>very large sets of points. There are three key ingredients<br>to our approach: 1) piecewise quadratic functions that capture<br>the local shape of the surface, 2) weighting functions (the<br>partitions of unity) that blend together these local shape functions,<br>and 3) an octree subdivision method that adapts to variations in<br>the complexity of the local shape.<br><br>Our approach gives us considerable flexibility in the choice of local<br>shape functions, and in particular we can accurately represent sharp<br>features such as edges and corners by selecting appropriate shape<br>functions. An error-controlled subdivision leads to an adaptive approximation<br>whose time and memory consumption depends on the required accuracy.<br>Due to the separation of local approximation and local blending,<br>the representation is not global <br>and can be created and evaluated rapidly.Because our surfaces are<br>described using implicit functions, operations such as shape blending,<br>offsets, deformations and CSG are simple to perform. %J ACM Transactions on Graphics %V 22 %N 3 %& 463 %P 463 - 470 %I ACM %C New York, NY %@ false
Mertens, T., Kautz, J., Bekaert, P., Seidel, H.-P., and Van Reeth, F. 2003a. Efficient Rendering of Local Subsurface Scattering. Proceedings of the 11th Pacific Conference on Computer Graphics and Applications (PG 2003), IEEE.
Export
BibTeX
@inproceedings{Mertens-et-al_PG03, TITLE = {Efficient Rendering of Local Subsurface Scattering}, AUTHOR = {Mertens, Tom and Kautz, Jan and Bekaert, Philippe and Seidel, Hans-Peter and Van Reeth, Frank}, LANGUAGE = {eng}, ISBN = {0-7695-2028-6}, DOI = {10.1109/PCCGA.2003.1238246}, LOCALID = {Local-ID: C125675300671F7B-77F7AC6F8CCFC818C1256E130016C210-Mertens:ERL:2003}, PUBLISHER = {IEEE}, YEAR = {2003}, DATE = {2003}, BOOKTITLE = {Proceedings of the 11th Pacific Conference on Computer Graphics and Applications (PG 2003)}, EDITOR = {Rokne, Jon and Klein, Reinhard and Wang, Wenping}, PAGES = {51--58}, ADDRESS = {Canmore, Canada}, }
Endnote
%0 Conference Proceedings %A Mertens, Tom %A Kautz, Jan %A Bekaert, Philippe %A Seidel, Hans-Peter %A Van Reeth, Frank %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Efficient Rendering of Local Subsurface Scattering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2CD8-A %F EDOC: 201855 %F OTHER: Local-ID: C125675300671F7B-77F7AC6F8CCFC818C1256E130016C210-Mertens:ERL:2003 %R 10.1109/PCCGA.2003.1238246 %D 2003 %B 11th Pacific Conference on Computer Graphics and Applications %Z date of event: 2003-10-08 - 2003-10-10 %C Canmore, Canada %B Proceedings of the 11th Pacific Conference on Computer Graphics and Applications %E Rokne, Jon; Klein, Reinhard; Wang, Wenping %P 51 - 58 %I IEEE %@ 0-7695-2028-6
Mertens, T., Kautz, J., Bekaert, P., Seidel, H.-P., and Van Reeth, F. 2003b. Interactive Rendering of Translucent Deformable Objects. Rendering Techniques 2003 (EGWR 2003), The Eurographics Association.
Export
BibTeX
@inproceedings{Mertens-et-al_EGWR03, TITLE = {Interactive Rendering of Translucent Deformable Objects}, AUTHOR = {Mertens, Tom and Kautz, Jan and Bekaert, Philippe and Seidel, Hans-Peter and Van Reeth, Frank}, LANGUAGE = {eng}, ISBN = {3-905673-03-7}, DOI = {10.2312/EGWR/EGWR03/130-140}, LOCALID = {Local-ID: C125675300671F7B-53B6734B1ED834FAC1256E130014ABC1-Mertens:IRT:2003}, PUBLISHER = {The Eurographics Association}, YEAR = {2003}, DATE = {2003}, BOOKTITLE = {Rendering Techniques 2003 (EGWR 2003)}, EDITOR = {Christensen, Per and Cohen-Or, Daniel}, PAGES = {130--140, 304}, ADDRESS = {Leuven, Belgium}, }
Endnote
%0 Conference Proceedings %A Mertens, Tom %A Kautz, Jan %A Bekaert, Philippe %A Seidel, Hans-Peter %A Van Reeth, Frank %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Interactive Rendering of Translucent Deformable Objects : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2D47-A %F EDOC: 201828 %F OTHER: Local-ID: C125675300671F7B-53B6734B1ED834FAC1256E130014ABC1-Mertens:IRT:2003 %R 10.2312/EGWR/EGWR03/130-140 %D 2003 %B 14th Eurographics Workshop on Rendering Techniques %Z date of event: 2003-06-25 - 2003-06-27 %C Leuven, Belgium %B Rendering Techniques 2003 %E Christensen, Per; Cohen-Or, Daniel %P 130 - 140, 304 %I The Eurographics Association %@ 3-905673-03-7
Mertens, T., Kautz, J., Bekaert, P., Seidel, H.-P., and Reeth, F.V. 2003c. Interactive Rendering of Translucent Deformable Objects. Proceedings of the SIGGRAPH 2003 Conference on Sketches and Applications, ACM.
Export
BibTeX
@inproceedings{DBLP:conf/siggraph/MertensKBSR03, TITLE = {Interactive Rendering of Translucent Deformable Objects}, AUTHOR = {Mertens, Tom and Kautz, Jan and Bekaert, Philippe and Seidel, Hans-Peter and Reeth, Frank Van}, LANGUAGE = {eng}, ISBN = {978-1-4503-7466-8}, DOI = {10.1145/965400.965409}, PUBLISHER = {ACM}, YEAR = {2003}, DATE = {2003}, BOOKTITLE = {Proceedings of the SIGGRAPH 2003 Conference on Sketches and Applications}, EDITOR = {Rockwood, Alyn P.}, PAGES = {1--1}, ADDRESS = {San Diego, CA, USA}, }
Endnote
%0 Conference Proceedings %A Mertens, Tom %A Kautz, Jan %A Bekaert, Philippe %A Seidel, Hans-Peter %A Reeth, Frank Van %+ External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Interactive Rendering of Translucent Deformable Objects : %G eng %U http://hdl.handle.net/21.11116/0000-000F-0C96-B %R 10.1145/965400.965409 %D 2003 %B ACM SIGGRAPH 2003 Conference on Sketches and Applications %Z date of event: 2003-07-27 - 2003-07-31 %C San Diego, CA, USA %B Proceedings of the SIGGRAPH 2003 Conference on Sketches and Applications %E Rockwood, Alyn P. %P 1 - 1 %I ACM %@ 978-1-4503-7466-8
Magnor, M. and Seidel, H.-P. 2003. Capturing the Shape of a Dynamic World - Fast ! Shape Modeling International 2003 (SMI 2003), IEEE.
Export
BibTeX
@inproceedings{Magnor-Seidel_SMI03, TITLE = {Capturing the Shape of a Dynamic World -- Fast !}, AUTHOR = {Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-1909-1}, DOI = {10.1109/SMI.2003.1199589}, LOCALID = {Local-ID: C125675300671F7B-E30286BF95739400C1256E21003A7794-Magnor03:CSD}, PUBLISHER = {IEEE}, YEAR = {2003}, DATE = {2003}, BOOKTITLE = {Shape Modeling International 2003 (SMI 2003)}, EDITOR = {Kim, Myung-Soo}, PAGES = {3--9}, ADDRESS = {Seoul, Korea}, }
Endnote
%0 Conference Proceedings %A Magnor, Marcus %A Seidel, Hans-Peter %+ Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Capturing the Shape of a Dynamic World - Fast ! : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2C6E-B %F EDOC: 201908 %F OTHER: Local-ID: C125675300671F7B-E30286BF95739400C1256E21003A7794-Magnor03:CSD %R 10.1109/SMI.2003.1199589 %D 2003 %B 2003 International Conference on Shape Modeling %Z date of event: 2003-05-12 - 2003-05-16 %C Seoul, Korea %B Shape Modeling International 2003 %E Kim, Myung-Soo %P 3 - 9 %I IEEE %@ 0-7695-1909-1
Li, M., Magnor, M., and Seidel, H.-P. 2003a. Online Accelerated Rendering of Visual Hulls in Real Scenes. Journal of WSCG11.
Abstract
This paper presents an online system which is capable of reconstructing and rendering dynamic objects in real scenes. We reconstruct visual hulls of the objects by using a shape-from-silhouette approach. During rendering, a novel blending scheme is employed to compose multiple background images. Visibility artifacts on the dynamic object are removed by using opaque projective texture mapping. We also propose a dynamic texture packing technique to improve rendering performance by exploiting region-of-interest information. Our system takes multiple live or pre-recorded video streams as input. It produces realistic real-time rendering results of dynamic objects in their surrounding natural environment in which the user can freely navigate.
Export
BibTeX
@article{Ming:WSCG2003:OnlineRenderingVH, TITLE = {Online Accelerated Rendering of Visual Hulls in Real Scenes}, AUTHOR = {Li, Ming and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1213-6972}, LOCALID = {Local-ID: C125675300671F7B-F966707B6258AAB2C1256C94003DE733-Ming:WSCG2003:OnlineRenderingVH}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {This paper presents an online system which is capable of reconstructing and rendering dynamic objects in real scenes. We reconstruct visual hulls of the objects by using a shape-from-silhouette approach. During rendering, a novel blending scheme is employed to compose multiple background images. Visibility artifacts on the dynamic object are removed by using opaque projective texture mapping. We also propose a dynamic texture packing technique to improve rendering performance by exploiting region-of-interest information. Our system takes multiple live or pre-recorded video streams as input. It produces realistic real-time rendering results of dynamic objects in their surrounding natural environment in which the user can freely navigate.}, JOURNAL = {Journal of WSCG}, VOLUME = {11}, PAGES = {290--297}, }
Endnote
%0 Journal Article %A Li, Ming %A Magnor, Marcus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Online Accelerated Rendering of Visual Hulls in Real Scenes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2D97-5 %F EDOC: 201826 %F OTHER: Local-ID: C125675300671F7B-F966707B6258AAB2C1256C94003DE733-Ming:WSCG2003:OnlineRenderingVH %D 2003 %* Review method: peer-reviewed %X This paper presents an online system which is capable of reconstructing and rendering dynamic objects in real scenes. We reconstruct visual hulls of the objects by using a shape-from-silhouette approach. During rendering, a novel blending scheme is employed to compose multiple background images. Visibility artifacts on the dynamic object are removed by using opaque projective texture mapping. We also propose a dynamic texture packing technique to improve rendering performance by exploiting region-of-interest information. Our system takes multiple live or pre-recorded video streams as input. It produces realistic real-time rendering results of dynamic objects in their surrounding natural environment in which the user can freely navigate. %J Journal of WSCG %V 11 %& 290 %P 290 - 297 %@ false
Li, M., Magnor, M., and Seidel, H.-P. 2003b. Improved Hardware-Accelerated Visual Hull Rendering. Vision, Modeling and Visualization 2003 (VMV 2003), Akademische Verlagsgesellschaft Aka.
Abstract
The visual hull is an efficient shape approximation for the purpose of <br>reconstructing and visualizing dynamic objects. Recently, rapid progress in <br>graphics hardware development has made it possible to render visual hulls from <br>a set of silhouette images in real-time. <br><br>In this paper we present several new algorithms to improve the generality and <br>quality of hardware-accelerated visual hull rendering. First, a multi-pass <br>approach employs texture objects and the stencil buffer to enable the visual <br>hull rendering algorithm to deal with arbitrary numbers of input images. <br>Secondly, flexible programmability of state-of-the-art graphics hardware is <br>exploited to achieve smooth transitions between textures from different <br>reference views projected onto visual hulls. In addition, visibility problems <br>with projective texture mapping are solved by using the shadow mapping <br>technique. We test our rendering algorithms on various off-the-shelf graphics <br>cards and achieve real-time frame rates.
Export
BibTeX
@inproceedings{Li-et-al_VMV03, TITLE = {Improved Hardware-Accelerated Visual Hull Rendering}, AUTHOR = {Li, Ming and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-89838-048-3}, LOCALID = {Local-ID: C125675300671F7B-401A8A42CE75880EC1256DDD005330D9-Li:2003:ImprovedHAVH}, PUBLISHER = {Akademische Verlagsgesellschaft Aka}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {The visual hull is an efficient shape approximation for the purpose of <br>reconstructing and visualizing dynamic objects. Recently, rapid progress in <br>graphics hardware development has made it possible to render visual hulls from <br>a set of silhouette images in real-time. <br><br>In this paper we present several new algorithms to improve the generality and <br>quality of hardware-accelerated visual hull rendering. First, a multi-pass <br>approach employs texture objects and the stencil buffer to enable the visual <br>hull rendering algorithm to deal with arbitrary numbers of input images. <br>Secondly, flexible programmability of state-of-the-art graphics hardware is <br>exploited to achieve smooth transitions between textures from different <br>reference views projected onto visual hulls. In addition, visibility problems <br>with projective texture mapping are solved by using the shadow mapping <br>technique. We test our rendering algorithms on various off-the-shelf graphics <br>cards and achieve real-time frame rates.}, BOOKTITLE = {Vision, Modeling and Visualization 2003 (VMV 2003)}, EDITOR = {Ertl, Thomas and Girod, Bernd and Greiner, G{\"u}nther and Niemann, Heinrich and Seidel, Hans-Peter and Steinbach, Eckehard and Westermann, R{\"u}diger}, PAGES = {151--158}, ADDRESS = {Munich, Germany}, }
Endnote
%0 Conference Proceedings %A Li, Ming %A Magnor, Marcus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Improved Hardware-Accelerated Visual Hull Rendering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2D3A-8 %F EDOC: 201804 %F OTHER: Local-ID: C125675300671F7B-401A8A42CE75880EC1256DDD005330D9-Li:2003:ImprovedHAVH %D 2003 %B International Fall Workshop on Vision, Modeling and Visualization 2003 %Z date of event: 2003-11-19 - 2003-11-21 %C Munich, Germany %X The visual hull is an efficient shape approximation for the purpose of <br>reconstructing and visualizing dynamic objects. Recently, rapid progress in <br>graphics hardware development has made it possible to render visual hulls from <br>a set of silhouette images in real-time. <br><br>In this paper we present several new algorithms to improve the generality and <br>quality of hardware-accelerated visual hull rendering. First, a multi-pass <br>approach employs texture objects and the stencil buffer to enable the visual <br>hull rendering algorithm to deal with arbitrary numbers of input images. <br>Secondly, flexible programmability of state-of-the-art graphics hardware is <br>exploited to achieve smooth transitions between textures from different <br>reference views projected onto visual hulls. In addition, visibility problems <br>with projective texture mapping are solved by using the shadow mapping <br>technique. We test our rendering algorithms on various off-the-shelf graphics <br>cards and achieve real-time frame rates. %B Vision, Modeling and Visualization 2003 %E Ertl, Thomas; Girod, Bernd; Greiner, G&#252;nther; Niemann, Heinrich; Seidel, Hans-Peter; Steinbach, Eckehard; Westermann, R&#252;diger %P 151 - 158 %I Akademische Verlagsgesellschaft Aka %@ 3-89838-048-3
Li, M., Magnor, M., and Seidel, H.-P. 2003c. Hardware-accelerated Visual Hull Reconstruction and Rendering. Proceedings of Graphics Interface 2003 (GI 2003), A K Peters.
Abstract
We present a novel algorithm for simultaneous visual hull reconstruction and <br>rendering by exploiting off-the-shelf graphics hardware. The reconstruction is <br>accomplished by projective texture mapping in conjunction with alpha test. <br>Parallel to the reconstruction, rendering is also carried out in the graphics <br>pipeline. We texture the visual hull view-dependently with the aid of fragment <br>shaders, such as nVIDIA's register combiners. Both reconstruction and rendering <br>are done in a single rendering pass. We achieve frame rates of more than 80 fps <br>on a standard PC equipped with a commodity graphics card. The performance is <br>significantly faster than previously reported performances of similar systems.
Export
BibTeX
@inproceedings{Li-et-al_GI03, TITLE = {Hardware-accelerated Visual Hull Reconstruction and Rendering}, AUTHOR = {Li, Ming and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {1-56881-207-8}, DOI = {10.20380/GI2003.08}, LOCALID = {Local-ID: C125675300671F7B-3C4A54BBF6083EA1C1256CD800353A88-Ming:GI03:HAVH}, PUBLISHER = {A K Peters}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {We present a novel algorithm for simultaneous visual hull reconstruction and <br>rendering by exploiting off-the-shelf graphics hardware. The reconstruction is <br>accomplished by projective texture mapping in conjunction with alpha test. <br>Parallel to the reconstruction, rendering is also carried out in the graphics <br>pipeline. We texture the visual hull view-dependently with the aid of fragment <br>shaders, such as nVIDIA's register combiners. Both reconstruction and rendering <br>are done in a single rendering pass. We achieve frame rates of more than 80 fps <br>on a standard PC equipped with a commodity graphics card. The performance is <br>significantly faster than previously reported performances of similar systems.}, BOOKTITLE = {Proceedings of Graphics Interface 2003 (GI 2003)}, EDITOR = {M{\"o}ller, Torsten and Ware, Colin}, PAGES = {65--72}, ADDRESS = {Halifax, Canada}, }
Endnote
%0 Conference Proceedings %A Li, Ming %A Magnor, Marcus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Hardware-accelerated Visual Hull Reconstruction and Rendering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2D1C-A %F EDOC: 201917 %F OTHER: Local-ID: C125675300671F7B-3C4A54BBF6083EA1C1256CD800353A88-Ming:GI03:HAVH %R 10.20380/GI2003.08 %D 2003 %B Graphics Interface 2003 %Z date of event: 2003-06-11 - 2003-06-13 %C Halifax, Canada %X We present a novel algorithm for simultaneous visual hull reconstruction and <br>rendering by exploiting off-the-shelf graphics hardware. The reconstruction is <br>accomplished by projective texture mapping in conjunction with alpha test. <br>Parallel to the reconstruction, rendering is also carried out in the graphics <br>pipeline. We texture the visual hull view-dependently with the aid of fragment <br>shaders, such as nVIDIA's register combiners. Both reconstruction and rendering <br>are done in a single rendering pass. We achieve frame rates of more than 80 fps <br>on a standard PC equipped with a commodity graphics card. The performance is <br>significantly faster than previously reported performances of similar systems. %B Proceedings of Graphics Interface 2003 %E M&#246;ller, Torsten; Ware, Colin %P 65 - 72 %I A K Peters %@ 1-56881-207-8
Lensch, H.P.A., Kautz, J., Goesele, M., Lang, J., and Seidel, H.-P. 2003a. Virtualizing Real-world Objects. Proceedings of the Computer Graphics International (CGI 2003), IEEE Computer Society.
Export
BibTeX
@inproceedings{Lensch-et-al_CGI03, TITLE = {Virtualizing Real-world Objects}, AUTHOR = {Lensch, Hendrik P. A. and Kautz, Jan and Goesele, Michael and Lang, Jochen and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-1946-6}, DOI = {10.1109/CGI.2003.1214458}, LOCALID = {Local-ID: C125675300671F7B-15DED5A900778BE8C1256E14005FC106-Lensch:2003:VRO}, PUBLISHER = {IEEE Computer Society}, YEAR = {2003}, DATE = {2003}, BOOKTITLE = {Proceedings of the Computer Graphics International (CGI 2003)}, PAGES = {134--141}, ADDRESS = {Tokyo, Japan}, }
Endnote
%0 Conference Proceedings %A Lensch, Hendrik P. A. %A Kautz, Jan %A Goesele, Michael %A Lang, Jochen %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Virtualizing Real-world Objects : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2E87-0 %F EDOC: 201985 %F OTHER: Local-ID: C125675300671F7B-15DED5A900778BE8C1256E14005FC106-Lensch:2003:VRO %R 10.1109/CGI.2003.1214458 %D 2003 %B Computer Graphics International 2003 %Z date of event: 2003-07-09 - 2003-07-11 %C Tokyo, Japan %B Proceedings of the Computer Graphics International %P 134 - 141 %I IEEE Computer Society %@ 0-7695-1946-6
Lensch, H.P.A., Lang, J., Sá, A.M., and Seidel, H.-P. 2003b. Planned Sampling of Spatially Varying BRDFs. Computer Graphics Forum22, 3.
Export
BibTeX
@article{Lensch-et-al_EUROGRAPHICS03, TITLE = {Planned Sampling of Spatially Varying {BRDFs}}, AUTHOR = {Lensch, Hendrik P. A. and Lang, Jochen and S{\'a}, Asla M. and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055; 0167-7055}, DOI = {10.1111/1467-8659.00695}, LOCALID = {Local-ID: C125675300671F7B-CF0CF043F2C3CCF9C1256CFE0033246B-Lensch2003:PS}, PUBLISHER = {Blackwell-Wiley}, PUBLISHER = {Blackwell}, ADDRESS = {Oxford}, ADDRESS = {Oxford, Uk}, YEAR = {2003}, DATE = {2003}, JOURNAL = {Computer Graphics Forum}, VOLUME = {22}, NUMBER = {3}, PAGES = {473--482}, BOOKTITLE = {EUROGRAPHICS 2003}, EDITOR = {Brunet, Pere and Fellner, Dieter W.}, }
Endnote
%0 Journal Article %A Lensch, Hendrik P. A. %A Lang, Jochen %A S&#225;, Asla M. %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Planned Sampling of Spatially Varying BRDFs : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2DBD-F %F EDOC: 201833 %F OTHER: Local-ID: C125675300671F7B-CF0CF043F2C3CCF9C1256CFE0033246B-Lensch2003:PS %R 10.1111/1467-8659.00695 %D 2003 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 22 %N 3 %& 473 %P 473 - 482 %I Blackwell-Wiley %C Oxford %@ false %B EUROGRAPHICS 2003 %I Blackwell %C Oxford, Uk %@ false
Lensch, H.P.A., Kautz, J., Goesele, M., Heidrich, W., and Seidel, H.-P. 2003c. Image-Based Reconstruction of Spatial Appearance and Geometric Detail. ACM Transactions on Graphics22, 2.
Abstract
Real-world objects are usually composed of a number of different materials that <br>often show subtle changes even within a single material. Photorealistic <br>rendering of such objects requires accurate measurements of the reflection <br>properties of each material, as well as the spatially varying effects. We <br>present an image-based measuring method that robustly detects the different <br>materials of real objects and fits an average bidirectional reflectance <br>distribution function (BRDF) to each of them. In order to model local changes <br>as well, we project the measured data for each surface point into a basis <br>formed by the recovered BRDFs leading to a truly spatially varying BRDF <br>representation. Real-world objects often also have fine geometric detail that <br>is not represented in an acquired mesh. To increase the detail, we derive <br>normal maps even for non-Lambertian surfaces using our measured BRDFs. A high <br>quality model of a real object can be generated with relatively little input <br>data. The generated model allows for rendering under arbitrary viewing and <br>lighting conditions and realistically reproduces the appearance of the original <br>object.
Export
BibTeX
@article{Lensch-et-al_ACM.Trans.Graph.03, TITLE = {Image-Based Reconstruction of Spatial Appearance and Geometric Detail}, AUTHOR = {Lensch, Hendrik P. A. and Kautz, Jan and Goesele, Michael and Heidrich, Wolfgang and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/636886.636891}, LOCALID = {Local-ID: C125675300671F7B-9DC8FDDC048DDFADC1256C5B002F0FA3-Lensch:IRS:2003}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {Real-world objects are usually composed of a number of different materials that <br>often show subtle changes even within a single material. Photorealistic <br>rendering of such objects requires accurate measurements of the reflection <br>properties of each material, as well as the spatially varying effects. We <br>present an image-based measuring method that robustly detects the different <br>materials of real objects and fits an average bidirectional reflectance <br>distribution function (BRDF) to each of them. In order to model local changes <br>as well, we project the measured data for each surface point into a basis <br>formed by the recovered BRDFs leading to a truly spatially varying BRDF <br>representation. Real-world objects often also have fine geometric detail that <br>is not represented in an acquired mesh. To increase the detail, we derive <br>normal maps even for non-Lambertian surfaces using our measured BRDFs. A high <br>quality model of a real object can be generated with relatively little input <br>data. The generated model allows for rendering under arbitrary viewing and <br>lighting conditions and realistically reproduces the appearance of the original <br>object.}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {22}, NUMBER = {2}, PAGES = {234--257}, }
Endnote
%0 Journal Article %A Lensch, Hendrik P. A. %A Kautz, Jan %A Goesele, Michael %A Heidrich, Wolfgang %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Image-Based Reconstruction of Spatial Appearance and Geometric Detail : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2D2E-4 %F EDOC: 201970 %F OTHER: Local-ID: C125675300671F7B-9DC8FDDC048DDFADC1256C5B002F0FA3-Lensch:IRS:2003 %R 10.1145/636886.636891 %D 2003 %* Review method: peer-reviewed %X Real-world objects are usually composed of a number of different materials that <br>often show subtle changes even within a single material. Photorealistic <br>rendering of such objects requires accurate measurements of the reflection <br>properties of each material, as well as the spatially varying effects. We <br>present an image-based measuring method that robustly detects the different <br>materials of real objects and fits an average bidirectional reflectance <br>distribution function (BRDF) to each of them. In order to model local changes <br>as well, we project the measured data for each surface point into a basis <br>formed by the recovered BRDFs leading to a truly spatially varying BRDF <br>representation. Real-world objects often also have fine geometric detail that <br>is not represented in an acquired mesh. To increase the detail, we derive <br>normal maps even for non-Lambertian surfaces using our measured BRDFs. A high <br>quality model of a real object can be generated with relatively little input <br>data. The generated model allows for rendering under arbitrary viewing and <br>lighting conditions and realistically reproduces the appearance of the original <br>object. %J ACM Transactions on Graphics %V 22 %N 2 %& 234 %P 234 - 257 %I ACM %C New York, NY %@ false
Lensch, H., Goesele, M., Bekaert, P., et al. 2003d. Interactive Rendering of Translucent Objects. Computer Graphics Forum22, 2.
Abstract
This paper presents a rendering method for translucent objects, in which <br>viewpoint and illumination can be<br>modified at interactive rates. In a preprocessing step, the impulse response to <br>incoming light impinging at each<br>surface point is computed and stored in two different ways: The local effect on <br>close-by surface points is modeled<br>as a per-texel filter kernel that is applied to a texture map representing the <br>incident illumination. The global<br>response (i.e. light shining through the object) is stored as vertex-to-vertex <br>throughput factors for the triangle<br>mesh of the object. During rendering, the illumination map for the object is <br>computed according to the current<br>lighting situation and then filtered by the precomputed kernels. The <br>illumination map is also used to derive the<br>incident illumination on the vertices which is distributed via the <br>vertex-to-vertex throughput factors to the other<br>vertices. The final image is obtained by combining the local and global <br>response. We demonstrate the performance<br>of our method for several models.
Export
BibTeX
@article{Lensch-et-al_Comp.Graph.Forum.03, TITLE = {Interactive Rendering of Translucent Objects}, AUTHOR = {Lensch, Hendrik and Goesele, Michael and Bekaert, Philippe and Kautz, Jan and Magnor, Marcus and Lang, Jochen and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/1467-8659.00660}, LOCALID = {Local-ID: C1256BDE005F57A8-B6E1057E8746BD09C1256E14003D6492-Lensch:2003:IRT}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {This paper presents a rendering method for translucent objects, in which <br>viewpoint and illumination can be<br>modified at interactive rates. In a preprocessing step, the impulse response to <br>incoming light impinging at each<br>surface point is computed and stored in two different ways: The local effect on <br>close-by surface points is modeled<br>as a per-texel filter kernel that is applied to a texture map representing the <br>incident illumination. The global<br>response (i.e. light shining through the object) is stored as vertex-to-vertex <br>throughput factors for the triangle<br>mesh of the object. During rendering, the illumination map for the object is <br>computed according to the current<br>lighting situation and then filtered by the precomputed kernels. The <br>illumination map is also used to derive the<br>incident illumination on the vertices which is distributed via the <br>vertex-to-vertex throughput factors to the other<br>vertices. The final image is obtained by combining the local and global <br>response. We demonstrate the performance<br>of our method for several models.}, JOURNAL = {Computer Graphics Forum}, VOLUME = {22}, NUMBER = {2}, PAGES = {195--205}, }
Endnote
%0 Journal Article %A Lensch, Hendrik %A Goesele, Michael %A Bekaert, Philippe %A Kautz, Jan %A Magnor, Marcus %A Lang, Jochen %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive Rendering of Translucent Objects : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2D4A-4 %F EDOC: 201805 %F OTHER: Local-ID: C1256BDE005F57A8-B6E1057E8746BD09C1256E14003D6492-Lensch:2003:IRT %R 10.1111/1467-8659.00660 %D 2003 %* Review method: peer-reviewed %X This paper presents a rendering method for translucent objects, in which <br>viewpoint and illumination can be<br>modified at interactive rates. In a preprocessing step, the impulse response to <br>incoming light impinging at each<br>surface point is computed and stored in two different ways: The local effect on <br>close-by surface points is modeled<br>as a per-texel filter kernel that is applied to a texture map representing the <br>incident illumination. The global<br>response (i.e. light shining through the object) is stored as vertex-to-vertex <br>throughput factors for the triangle<br>mesh of the object. During rendering, the illumination map for the object is <br>computed according to the current<br>lighting situation and then filtered by the precomputed kernels. The <br>illumination map is also used to derive the<br>incident illumination on the vertices which is distributed via the <br>vertex-to-vertex throughput factors to the other<br>vertices. The final image is obtained by combining the local and global <br>response. We demonstrate the performance<br>of our method for several models. %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 22 %N 2 %& 195 %P 195 - 205 %I Blackwell-Wiley %C Oxford %@ false
Lang, J., Seidel, H.-P., and Lensch, H.P.A. 2003a. View Planning for BRDF Acquisition. Proceedings of the ACM SIGGRAPH 2003 Conference on Sketches and Applications, ACM.
Export
BibTeX
@inproceedings{DBLP:conf/siggraph/LangSL03, TITLE = {View Planning for {BRDF} Acquisition}, AUTHOR = {Lang, Jochen and Seidel, Hans-Peter and Lensch, Hendrik P. A.}, LANGUAGE = {eng}, ISBN = {978-1-4503-7466-8}, DOI = {10.1145/965400.965407}, PUBLISHER = {ACM}, YEAR = {2003}, DATE = {2003}, BOOKTITLE = {Proceedings of the ACM SIGGRAPH 2003 Conference on Sketches and Applications}, EDITOR = {Rockwood, Alyn P.}, PAGES = {1--1}, ADDRESS = {San Diego, CA, USA}, }
Endnote
%0 Conference Proceedings %A Lang, Jochen %A Seidel, Hans-Peter %A Lensch, Hendrik P. A. %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T View Planning for BRDF Acquisition : %G eng %U http://hdl.handle.net/21.11116/0000-000F-0C9A-7 %R 10.1145/965400.965407 %D 2003 %B ACM SIGGRAPH 2003 Conference on Sketches and Applications %Z date of event: 2003-07-27 - 2003-07-31 %C San Diego, CA, USA %B Proceedings of the ACM SIGGRAPH 2003 Conference on Sketches and Applications %E Rockwood, Alyn P. %P 1 - 1 %I ACM %@ 978-1-4503-7466-8
Lang, J., Seidel, H.-P., and Pai, D.K. 2003b. Scanning Large-Scale Articulated Deformations. Proceedings of Graphics Interface 2003 (GI 2003), Canadian Human-Computer Communications Society.
Export
BibTeX
@inproceedings{Lang-et-al_GI03, TITLE = {Scanning Large-Scale Articulated Deformations}, AUTHOR = {Lang, Jochen and Seidel, Hans-Peter and Pai, Dinesh K.}, LANGUAGE = {eng}, ISBN = {1-56881-207-8}, DOI = {10.20380/GI2003.31}, LOCALID = {Local-ID: C125675300671F7B-5339CA45B1488FFAC1256CE90037EA35-LangEtal2003a}, PUBLISHER = {Canadian Human-Computer Communications Society}, YEAR = {2003}, DATE = {2003}, BOOKTITLE = {Proceedings of Graphics Interface 2003 (GI 2003)}, EDITOR = {M{\"o}ller, Torsten and Ware, Colin}, PAGES = {265--272}, ADDRESS = {Halifax, Canada}, }
Endnote
%0 Conference Proceedings %A Lang, Jochen %A Seidel, Hans-Peter %A Pai, Dinesh K. %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Scanning Large-Scale Articulated Deformations : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2DFB-3 %F EDOC: 201994 %F OTHER: Local-ID: C125675300671F7B-5339CA45B1488FFAC1256CE90037EA35-LangEtal2003a %R 10.20380/GI2003.31 %D 2003 %B Graphics Interface 2003 %Z date of event: 2003-06-11 - 2003-06-13 %C Halifax, Canada %B Proceedings of Graphics Interface 2003 %E M&#246;ller, Torsten; Ware, Colin %P 265 - 272 %I Canadian Human-Computer Communications Society %@ 1-56881-207-8 %U https://graphicsinterface.org/wp-content/uploads/gi2003-31.pdf
Kautz, J., Lensch, H.P.A., Gösele, M., Lang, J., and Seidel, H.-P. 2003. Modeling the World: The Virtualization Pipeline. 12th International Conference on Image Analysis and Processing, 2003, Proceedings, IEEE explore.
Export
BibTeX
@inproceedings{DBLP:conf/iciap/KautzLGLS03, TITLE = {Modeling the World: The Virtualization Pipeline}, AUTHOR = {Kautz, Jan and Lensch, Hendrik P. A. and G{\"o}sele, Michael and Lang, Jochen and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-1948-2}, DOI = {10.1109/ICIAP.2003.1234044}, PUBLISHER = {IEEE explore}, YEAR = {2003}, DATE = {2003}, BOOKTITLE = {12th International Conference on Image Analysis and Processing, 2003, Proceedings}, PAGES = {166--174}, ADDRESS = {Mantova, Italy}, }
Endnote
%0 Conference Proceedings %A Kautz, Jan %A Lensch, Hendrik P. A. %A G&#246;sele, Michael %A Lang, Jochen %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Modeling the World: The Virtualization Pipeline : %G eng %U http://hdl.handle.net/21.11116/0000-000F-0F12-D %R 10.1109/ICIAP.2003.1234044 %D 2003 %B 12th International Conference on Image Analysis and Processing %Z date of event: 2003-09-17 - 2003-09-19 %C Mantova, Italy %B 12th International Conference on Image Analysis and Processing, 2003, Proceedings %P 166 - 174 %I IEEE explore %@ 0-7695-1948-2
Kähler, K., Haber, J., and Seidel, H.-P. 2003a. Reanimating the Dead: Reconstruction of Expressive Faces from Skull Data. ACM Transactions on Graphics (Proc. SIGGRAPH 2003), ACM.
Abstract
Facial reconstruction for postmortem identification of humans from<br> their skeletal remains is a challenging and fascinating part of<br> forensic art. The former look of a face can be approximated by<br> predicting and modeling the layers of tissue on the skull.<br> This work is as of today carried out solely by physical sculpting<br> with clay, where experienced artists invest up to hundreds of hours<br> to craft a reconstructed face model. Remarkably, one of the most<br> popular tissue reconstruction methods bears many resemblances with<br> surface fitting techniques used in computer graphics, thus<br> suggesting the possibility of a transfer of the manual approach to<br> the computer. In this paper, we present a facial reconstruction<br> approach that fits an anatomy-based virtual head model,<br> incorporating skin and muscles, to a scanned skull using<br> statistical data on skull / tissue relationships. The approach has<br> many advantages over the traditional process: a reconstruction can<br> be completed in about an hour from acquired skull data; also,<br> variations such as a slender or a more obese build of the modeled<br> individual are easily created. Last not least, by matching not only<br> skin geometry but also virtual muscle layers, an animatable head<br> model is generated that can be used to form facial expressions<br> beyond the neutral face typically used in physical<br> reconstructions.
Export
BibTeX
@inproceedings{Kahler-et-al_SIGGRAPH03, TITLE = {Reanimating the Dead: Reconstruction of Expressive Faces from Skull Data}, AUTHOR = {K{\"a}hler, Kolja and Haber, J{\"o}rg and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/882262.882307}, LOCALID = {Local-ID: C125675300671F7B-53870ED9C6CEF425C1256CF3002C2808-Kaehler:2003:RD}, PUBLISHER = {ACM}, PUBLISHER = {ACM}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {Facial reconstruction for postmortem identification of humans from<br> their skeletal remains is a challenging and fascinating part of<br> forensic art. The former look of a face can be approximated by<br> predicting and modeling the layers of tissue on the skull.<br> This work is as of today carried out solely by physical sculpting<br> with clay, where experienced artists invest up to hundreds of hours<br> to craft a reconstructed face model. Remarkably, one of the most<br> popular tissue reconstruction methods bears many resemblances with<br> surface fitting techniques used in computer graphics, thus<br> suggesting the possibility of a transfer of the manual approach to<br> the computer. In this paper, we present a facial reconstruction<br> approach that fits an anatomy-based virtual head model,<br> incorporating skin and muscles, to a scanned skull using<br> statistical data on skull / tissue relationships. The approach has<br> many advantages over the traditional process: a reconstruction can<br> be completed in about an hour from acquired skull data; also,<br> variations such as a slender or a more obese build of the modeled<br> individual are easily created. Last not least, by matching not only<br> skin geometry but also virtual muscle layers, an animatable head<br> model is generated that can be used to form facial expressions<br> beyond the neutral face typically used in physical<br> reconstructions.}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2003}, EDITOR = {Hodgins, Jessica K.}, PAGES = {554--561}, JOURNAL = {ACM Transactions on Graphics (Proc. SIGGRAPH)}, VOLUME = {22}, ISSUE = {3}, ADDRESS = {San Diego, USA}, }
Endnote
%0 Conference Proceedings %A K&#228;hler, Kolja %A Haber, J&#246;rg %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Reanimating the Dead: Reconstruction of Expressive Faces from Skull Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2DEE-1 %F EDOC: 201845 %F OTHER: Local-ID: C125675300671F7B-53870ED9C6CEF425C1256CF3002C2808-Kaehler:2003:RD %R 10.1145/882262.882307 %D 2003 %B ACM SIGGRAPH 2003 %Z date of event: 2003-07-27 - 2003-07-31 %C San Diego, USA %X Facial reconstruction for postmortem identification of humans from<br> their skeletal remains is a challenging and fascinating part of<br> forensic art. The former look of a face can be approximated by<br> predicting and modeling the layers of tissue on the skull.<br> This work is as of today carried out solely by physical sculpting<br> with clay, where experienced artists invest up to hundreds of hours<br> to craft a reconstructed face model. Remarkably, one of the most<br> popular tissue reconstruction methods bears many resemblances with<br> surface fitting techniques used in computer graphics, thus<br> suggesting the possibility of a transfer of the manual approach to<br> the computer. In this paper, we present a facial reconstruction<br> approach that fits an anatomy-based virtual head model,<br> incorporating skin and muscles, to a scanned skull using<br> statistical data on skull / tissue relationships. The approach has<br> many advantages over the traditional process: a reconstruction can<br> be completed in about an hour from acquired skull data; also,<br> variations such as a slender or a more obese build of the modeled<br> individual are easily created. Last not least, by matching not only<br> skin geometry but also virtual muscle layers, an animatable head<br> model is generated that can be used to form facial expressions<br> beyond the neutral face typically used in physical<br> reconstructions. %B Proceedings of ACM SIGGRAPH 2003 %E Hodgins, Jessica K. %P 554 - 561 %I ACM %J ACM Transactions on Graphics %V 22 %N 3 %I ACM %@ false
Kähler, K., Haber, J., and Seidel, H.-P. 2003b. Dynamically Refining Animated Triangle Meshes for Rendering. The Visual Computer19.
Abstract
We present a method to dynamically apply local refinements to<br>an irregular triangle mesh<br>as it deforms in real-time. The method increases surface smoothness<br>in regions of high deformation by splitting triangles in a fashion<br>similar to one or two steps of Loop subdivision. The refinement is<br>computed for an arbitrary triangle mesh and the subdivided triangles<br>are simply passed to the rendering engine, leaving the mesh itself<br>unchanged. The algorithm can thus be easily plugged into existing<br>systems to enhance visual appearance of animated meshes. The<br>refinement step has very low computational overhead and is easy to<br>implement. We demonstrate the use of the algorithm in our<br>physics-based facial animation system.
Export
BibTeX
@article{Kahler-et-al_Vis-Comp.03, TITLE = {Dynamically Refining Animated Triangle Meshes for Rendering}, AUTHOR = {K{\"a}hler, Kolja and Haber, J{\"o}rg and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0178-2789}, DOI = {10.1007/s00371-002-0185-8}, LOCALID = {Local-ID: C125675300671F7B-D47864863E19724BC1256C8300580D0D-Kaehler:DRTVC:2001}, PUBLISHER = {Springer International}, ADDRESS = {Berlin}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {We present a method to dynamically apply local refinements to<br>an irregular triangle mesh<br>as it deforms in real-time. The method increases surface smoothness<br>in regions of high deformation by splitting triangles in a fashion<br>similar to one or two steps of Loop subdivision. The refinement is<br>computed for an arbitrary triangle mesh and the subdivided triangles<br>are simply passed to the rendering engine, leaving the mesh itself<br>unchanged. The algorithm can thus be easily plugged into existing<br>systems to enhance visual appearance of animated meshes. The<br>refinement step has very low computational overhead and is easy to<br>implement. We demonstrate the use of the algorithm in our<br>physics-based facial animation system.}, JOURNAL = {The Visual Computer}, VOLUME = {19}, PAGES = {310--318}, }
Endnote
%0 Journal Article %A K&#228;hler, Kolja %A Haber, J&#246;rg %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Dynamically Refining Animated Triangle Meshes for Rendering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2CC0-D %F EDOC: 202016 %F OTHER: Local-ID: C125675300671F7B-D47864863E19724BC1256C8300580D0D-Kaehler:DRTVC:2001 %R 10.1007/s00371-002-0185-8 %D 2003 %* Review method: peer-reviewed %X We present a method to dynamically apply local refinements to<br>an irregular triangle mesh<br>as it deforms in real-time. The method increases surface smoothness<br>in regions of high deformation by splitting triangles in a fashion<br>similar to one or two steps of Loop subdivision. The refinement is<br>computed for an arbitrary triangle mesh and the subdivided triangles<br>are simply passed to the rendering engine, leaving the mesh itself<br>unchanged. The algorithm can thus be easily plugged into existing<br>systems to enhance visual appearance of animated meshes. The<br>refinement step has very low computational overhead and is easy to<br>implement. We demonstrate the use of the algorithm in our<br>physics-based facial animation system. %J The Visual Computer %V 19 %& 310 %P 310 - 318 %I Springer International %C Berlin %@ false %U https://rdcu.be/dB51Y
Jeong, W.-K., Ivrissimtzis, I., and Seidel, H.-P. 2003. Neural Meshes: Statistical Learning based on Normals. Proceedings of the 11th Pacific Conference on Computer Graphics and Applications (PG 2003), IEEE.
Export
BibTeX
@inproceedings{Jeong-et-al_PG03, TITLE = {Neural Meshes: Statistical Learning based on Normals}, AUTHOR = {Jeong, Won-Ki and Ivrissimtzis, Ioannis and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-2028-6}, DOI = {10.1109/PCCGA.2003.1238284}, LOCALID = {Local-ID: C125675300671F7B-FB34C351DED7A5D8C1256E0B00697AC2-jis2003}, PUBLISHER = {IEEE}, YEAR = {2003}, DATE = {2003}, BOOKTITLE = {Proceedings of the 11th Pacific Conference on Computer Graphics and Applications (PG 2003)}, EDITOR = {Rokne, Jon and Klein, Reinhard and Wang, Wenping}, PAGES = {404--408}, ADDRESS = {Canmore, Alberta, Canada}, }
Endnote
%0 Conference Proceedings %A Jeong, Won-Ki %A Ivrissimtzis, Ioannis %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Neural Meshes: Statistical Learning based on Normals : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2D86-B %F EDOC: 201816 %F OTHER: Local-ID: C125675300671F7B-FB34C351DED7A5D8C1256E0B00697AC2-jis2003 %R 10.1109/PCCGA.2003.1238284 %D 2003 %B 11th Pacific Conference on Computer Graphics and Applications %Z date of event: 2003-10-08 - 2003-10-10 %C Canmore, Alberta, Canada %B Proceedings of the 11th Pacific Conference on Computer Graphics and Applications %E Rokne, Jon; Klein, Reinhard; Wang, Wenping %P 404 - 408 %I IEEE %@ 0-7695-2028-6
Ivrissimtzis, I., Rössl, C., and Seidel, H.-P. 2003a. Tree-based Data Structures for Triangle Mesh Connectivity Encoding. In: G. Brunnett, B. Hamann and H. Müller, eds., Geometric Modeling for Scientific Visualization. Springer, Heidelberg, Germany.
Export
BibTeX
@incollection{IRS:2003:TBDS, TITLE = {Tree-based Data Structures for Triangle Mesh Connectivity Encoding}, AUTHOR = {Ivrissimtzis, Ioannis and R{\"o}ssl, Christian and Seidel, Hans-Peter}, EDITOR = {Brunnett, Guido and Hamann, Bernd and M{\"u}ller, Heinrich}, LANGUAGE = {eng}, LOCALID = {Local-ID: C125675300671F7B-0E3462E2C69E959BC1256D09002F57F0-IRS:2003:TBDS}, PUBLISHER = {Springer}, ADDRESS = {Heidelberg, Germany}, YEAR = {2003}, DATE = {2003}, BOOKTITLE = {Geometric Modeling for Scientific Visualization}, DEBUG = {editor: Brunnett, Guido; editor: Hamann, Bernd; editor: M{\"u}ller, Heinrich}, PAGES = {171--187}, }
Endnote
%0 Book Section %A Ivrissimtzis, Ioannis %A R&#246;ssl, Christian %A Seidel, Hans-Peter %E Brunnett, Guido %E Hamann, Bernd %E M&#252;ller, Heinrich %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Tree-based Data Structures for Triangle Mesh Connectivity Encoding : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2E71-2 %F EDOC: 201950 %F OTHER: Local-ID: C125675300671F7B-0E3462E2C69E959BC1256D09002F57F0-IRS:2003:TBDS %I Springer %C Heidelberg, Germany %D 2003 %B Geometric Modeling for Scientific Visualization %E Brunnett, Guido; Hamann, Bernd; M&#252;ller, Heinrich %P 171 - 187 %I Springer %C Heidelberg, Germany
Ivrissimtzis, I., Jeong, W.-K., and Seidel, H.-P. 2003b. Using Growing Cell Structures for Surface Reconstruction. Shape Modeling International 2003 (SMI 2003), IEEE.
Export
BibTeX
@inproceedings{Ivrissimtzis-et-al_SMI03, TITLE = {Using Growing Cell Structures for Surface Reconstruction}, AUTHOR = {Ivrissimtzis, Ioannis and Jeong, Won-Ki and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-1909-1}, DOI = {10.1109/SMI.2003.1199604}, LOCALID = {Local-ID: C125675300671F7B-DA58732949776FFDC1256CEF005A24FE-ijs03}, PUBLISHER = {IEEE}, YEAR = {2003}, DATE = {2003}, BOOKTITLE = {Shape Modeling International 2003 (SMI 2003)}, EDITOR = {Kim, Myung-Soo}, PAGES = {78--86}, ADDRESS = {Seoul, Korea}, }
Endnote
%0 Conference Proceedings %A Ivrissimtzis, Ioannis %A Jeong, Won-Ki %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Using Growing Cell Structures for Surface Reconstruction : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2E81-B %F EDOC: 201968 %F OTHER: Local-ID: C125675300671F7B-DA58732949776FFDC1256CEF005A24FE-ijs03 %R 10.1109/SMI.2003.1199604 %D 2003 %B 2003 International Conference on Shape Modeling %Z date of event: 2003-05-12 - 2003-05-16 %C Seoul, Korea %B Shape Modeling International 2003 %E Kim, Myung-Soo %P 78 - 86 %I IEEE %@ 0-7695-1909-1
Ivrissimtzis, I. and Seidel, H.-P. 2003. Combinatorial Properties of Subdivision Meshes. Mathematics of Surfaces (IMA 2003), Springer.
Export
BibTeX
@inproceedings{Ivrissimtzis-Seidel_IMA03, TITLE = {Combinatorial Properties of Subdivision Meshes}, AUTHOR = {Ivrissimtzis, Ioannis and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0302-9743}, ISBN = {978-3-540-20053-6}, DOI = {10.1007/978-3-540-39422-8_6}, LOCALID = {Local-ID: C125675300671F7B-6C619684F52FB535C1256CF4004CB9E1-is03}, PUBLISHER = {Springer}, YEAR = {2003}, DATE = {2003}, BOOKTITLE = {Mathematics of Surfaces (IMA 2003)}, EDITOR = {Wilson, Michael J. and Martin, Ralph R.}, PAGES = {73--84}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {2768}, ADDRESS = {Leeds, UK}, }
Endnote
%0 Conference Proceedings %A Ivrissimtzis, Ioannis %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Combinatorial Properties of Subdivision Meshes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2C87-2 %F EDOC: 201964 %F OTHER: Local-ID: C125675300671F7B-6C619684F52FB535C1256CF4004CB9E1-is03 %R 10.1007/978-3-540-39422-8_6 %D 2003 %B 10th IMA International Conference %Z date of event: 2003-09-15 - 2003-09-17 %C Leeds, UK %B Mathematics of Surfaces %E Wilson, Michael J.; Martin, Ralph R. %P 73 - 84 %I Springer %@ 978-3-540-20053-6 %B Lecture Notes in Computer Science %N 2768 %@ false %U https://rdcu.be/dBYEw
Ivrissimtzis, I., Shrivastava, K., and Seidel, H.-P. 2003c. Subdivision Rules for General Meshes. Curve and Surface Fitting: Saint-Malo 2002, Nashboro Press.
Export
BibTeX
@inproceedings{iss03, TITLE = {Subdivision Rules for General Meshes}, AUTHOR = {Ivrissimtzis, Ioannis and Shrivastava, Kanishka and Seidel, Hans-Peter}, EDITOR = {Cohen, Albert and Merrien, Jean-Louis and Schumaker, Larry L.}, LANGUAGE = {eng}, ISBN = {0-9728482-1-5}, LOCALID = {Local-ID: C125675300671F7B-A478D3CFC7926A43C1256CEF005BAE87-iss03}, PUBLISHER = {Nashboro Press}, YEAR = {2002}, DATE = {2003}, BOOKTITLE = {Curve and Surface Fitting: Saint-Malo 2002}, PAGES = {229--238}, SERIES = {Proceedings of the 5th Conference on Curves and Surfaces}, ADDRESS = {St Malo, France}, }
Endnote
%0 Conference Proceedings %A Ivrissimtzis, Ioannis %A Shrivastava, Kanishka %A Seidel, Hans-Peter %E Cohen, Albert %E Merrien, Jean-Louis %E Schumaker, Larry L. %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Subdivision Rules for General Meshes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2E3B-D %F EDOC: 201848 %F OTHER: Local-ID: C125675300671F7B-A478D3CFC7926A43C1256CEF005BAE87-iss03 %D 2003 %B Curve and Surface Fitting 2002 %Z date of event: 2002-06-27 - %C St Malo, France %B Curve and Surface Fitting: Saint-Malo 2002 %P 229 - 238 %I Nashboro Press %@ 0-9728482-1-5 %B Proceedings of the 5th Conference on Curves and Surfaces
Havran, V., Bittner, J., and Seidel, H.-P. 2003a. Exploiting Temporal Coherence in Ray Casted Walkthroughs. Proceedings of the 19th Spring Conference on Computer Graphics (SCCG 2003), ACM.
Abstract
We present a technique that aims at exploiting temporal coherence of ray casted <br>walkthroughs. Our goal is to reuse ray/object<br>intersections computed in the last frame of the walkthrough for<br>acceleration of ray casting in the current frame. In particular we aim at <br>eliminating the ray traversal and computing only a single ray/object <br>intersection per pixel. If our technique does not succeed in determining <br>visibility, it falls back to the classical ray traversal. Visible point samples <br>from the last frame are reprojected to the current frame. To identify whether <br>these samples can be reused we apply splatting and epipolar geometry <br>constraints. We discuss two additional techniques that handle correct <br>appearance of small objects. We conducted a series of tests on walkthroughs of <br>building interiors. Our method succeeded in determining visibility of more than <br>78\% of pixels. For these pixels only a single ray/object intersection is <br>executed. The frame rate is increased by up to 47\%. Finally, we<br>argue that the achieved speedup is relatively significant by comparing the <br>performance of our algorithm to the ``ideal'' ray shooting algorithm.
Export
BibTeX
@inproceedings{Havran-et-al_SCCG03, TITLE = {Exploiting Temporal Coherence in Ray Casted Walkthroughs}, AUTHOR = {Havran, Vlastimil and Bittner, Jiri and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-58113-861-0}, DOI = {10.1145/984952.984977}, LOCALID = {Local-ID: C125675300671F7B-8FBB3C9E989B6A44C1256CFE007D9073-Havran2003:SCCG}, PUBLISHER = {ACM}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {We present a technique that aims at exploiting temporal coherence of ray casted <br>walkthroughs. Our goal is to reuse ray/object<br>intersections computed in the last frame of the walkthrough for<br>acceleration of ray casting in the current frame. In particular we aim at <br>eliminating the ray traversal and computing only a single ray/object <br>intersection per pixel. If our technique does not succeed in determining <br>visibility, it falls back to the classical ray traversal. Visible point samples <br>from the last frame are reprojected to the current frame. To identify whether <br>these samples can be reused we apply splatting and epipolar geometry <br>constraints. We discuss two additional techniques that handle correct <br>appearance of small objects. We conducted a series of tests on walkthroughs of <br>building interiors. Our method succeeded in determining visibility of more than <br>78\% of pixels. For these pixels only a single ray/object intersection is <br>executed. The frame rate is increased by up to 47\%. Finally, we<br>argue that the achieved speedup is relatively significant by comparing the <br>performance of our algorithm to the ``ideal'' ray shooting algorithm.}, BOOKTITLE = {Proceedings of the 19th Spring Conference on Computer Graphics (SCCG 2003)}, EDITOR = {Joy, Kenneth I.}, PAGES = {149--155}, ADDRESS = {Budmerice, Slovakia}, }
Endnote
%0 Conference Proceedings %A Havran, Vlastimil %A Bittner, Jiri %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Exploiting Temporal Coherence in Ray Casted Walkthroughs : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2CF1-0 %F EDOC: 201819 %F OTHER: Local-ID: C125675300671F7B-8FBB3C9E989B6A44C1256CFE007D9073-Havran2003:SCCG %R 10.1145/984952.984977 %D 2003 %B 19th Spring Conference on Computer Graphics %Z date of event: 2003-04-24 - 2003-04-26 %C Budmerice, Slovakia %X We present a technique that aims at exploiting temporal coherence of ray casted <br>walkthroughs. Our goal is to reuse ray/object<br>intersections computed in the last frame of the walkthrough for<br>acceleration of ray casting in the current frame. In particular we aim at <br>eliminating the ray traversal and computing only a single ray/object <br>intersection per pixel. If our technique does not succeed in determining <br>visibility, it falls back to the classical ray traversal. Visible point samples <br>from the last frame are reprojected to the current frame. To identify whether <br>these samples can be reused we apply splatting and epipolar geometry <br>constraints. We discuss two additional techniques that handle correct <br>appearance of small objects. We conducted a series of tests on walkthroughs of <br>building interiors. Our method succeeded in determining visibility of more than <br>78\% of pixels. For these pixels only a single ray/object intersection is <br>executed. The frame rate is increased by up to 47\%. Finally, we<br>argue that the achieved speedup is relatively significant by comparing the <br>performance of our algorithm to the ``ideal'' ray shooting algorithm. %B Proceedings of the 19th Spring Conference on Computer Graphics %E Joy, Kenneth I. %P 149 - 155 %I ACM %@ 978-1-58113-861-0
Havran, V., Damez, C., Myszkowski, K., and Seidel, H.-P. 2003b. An Efficient Spatio-temporal Architecture for Animation Rendering. Proceedings of the ACM SIGGRAPH 2003 Conference on Sketches and Applications, ACM.
Export
BibTeX
@inproceedings{DBLP:conf/siggraph/HavranDMS03, TITLE = {An Efficient Spatio-temporal Architecture for Animation Rendering}, AUTHOR = {Havran, Vlastimil and Damez, Cyrille and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4503-7466-8}, DOI = {10.1145/965400.965402}, PUBLISHER = {ACM}, YEAR = {2003}, DATE = {2003}, BOOKTITLE = {Proceedings of the ACM SIGGRAPH 2003 Conference on Sketches and Applications}, EDITOR = {Rockwood, Alyn P.}, PAGES = {1--1}, ADDRESS = {San Diego}, }
Endnote
%0 Conference Proceedings %A Havran, Vlastimil %A Damez, Cyrille %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T An Efficient Spatio-temporal Architecture for Animation Rendering : %G eng %U http://hdl.handle.net/21.11116/0000-000F-0CA2-D %R 10.1145/965400.965402 %D 2003 %B ACM SIGGRAPH 2003 Conference on Sketches and Applications %Z date of event: 2003-07-27 - 2003-07-31 %C San Diego %B Proceedings of the ACM SIGGRAPH 2003 Conference on Sketches and Applications %E Rockwood, Alyn P. %P 1 - 1 %I ACM %@ 978-1-4503-7466-8
Havran, V., Dmitriev, K.A., and Seidel, H.-P. 2003c. Goniometric Diagram Mapping for Hemisphere. Eurographics 2003 - Short Presentations, The Eurographics Association.
Export
BibTeX
@inproceedings{DBLP:conf/eurographics/HavranDS03, TITLE = {Goniometric Diagram Mapping for Hemisphere}, AUTHOR = {Havran, Vlastimil and Dmitriev, Kirill Alexandrovich and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1017-4656}, DOI = {10.2312/EGS.20031042}, PUBLISHER = {The Eurographics Association}, YEAR = {2003}, DATE = {2003}, BOOKTITLE = {Eurographics 2003 -- Short Presentations}, EDITOR = {Chover, Miguel and Hagen, Hans and Tost, Daniela}, PAGES = {1--8}, ADDRESS = {Granada, Spain}, }
Endnote
%0 Conference Proceedings %A Havran, Vlastimil %A Dmitriev, Kirill Alexandrovich %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Goniometric Diagram Mapping for Hemisphere : %G eng %U http://hdl.handle.net/21.11116/0000-000F-1036-2 %R 10.2312/EGS.20031042 %D 2003 %B 24th Annual Conference of the European Association for Computer Graphics %Z date of event: 2003-09-01 - 2003-09-05 %C Granada, Spain %B Eurographics 2003 - Short Presentations %E Chover, Miguel; Hagen, Hans; Tost, Daniela %P 1 - 8 %I The Eurographics Association %@ false
Havran, V., Damez, C., Myszkowski, K., and Seidel, H.-P. 2003d. An Efficient Spatio-temporal Architecture for Animation Rendering. Rendering Techniques 2003 (EGWR 2003), The Eurographics Association.
Abstract
Producing high quality animations featuring rich object appearance and <br>compelling lighting effects is very time consuming using traditional <br>frame-by-frame rendering systems. In this paper we present a rendering <br>architecture for computing multiple frames at once by exploiting the coherence <br>between image samples in the temporal domain. For each sample representing a <br>given point in the scene we update its view-dependent components for each frame<br>and add its contribution to pixels identified through the compensation of <br>camera and object motion. This leads naturally to a high quality motion blur <br>and significantly reduces the cost of illumination computations. The required <br>visibility information is provided using a custom ray tracing acceleration data <br>structure for multiple frames simultaneously. We demonstrate that precise<br>and costly global illumination techniques such as bidirectional path tracing <br>become affordable in this rendering architecture.
Export
BibTeX
@inproceedings{Havran-et-al_EGWR03, TITLE = {An Efficient Spatio-temporal Architecture for Animation Rendering}, AUTHOR = {Havran, Vlastimil and Damez, Cyrille and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {1-58113-754-0}, DOI = {10.2312/EGWR/EGWR03/106-117}, LOCALID = {Local-ID: C125675300671F7B-375DE41ADBC27783C1256D2500414C13-Havran2003:EGSR}, PUBLISHER = {The Eurographics Association}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {Producing high quality animations featuring rich object appearance and <br>compelling lighting effects is very time consuming using traditional <br>frame-by-frame rendering systems. In this paper we present a rendering <br>architecture for computing multiple frames at once by exploiting the coherence <br>between image samples in the temporal domain. For each sample representing a <br>given point in the scene we update its view-dependent components for each frame<br>and add its contribution to pixels identified through the compensation of <br>camera and object motion. This leads naturally to a high quality motion blur <br>and significantly reduces the cost of illumination computations. The required <br>visibility information is provided using a custom ray tracing acceleration data <br>structure for multiple frames simultaneously. We demonstrate that precise<br>and costly global illumination techniques such as bidirectional path tracing <br>become affordable in this rendering architecture.}, BOOKTITLE = {Rendering Techniques 2003 (EGWR 2003)}, EDITOR = {Christensen, Per and Cohen-Or, Daniel}, PAGES = {106--117, 303}, ADDRESS = {Leuven, Belgium}, }
Endnote
%0 Conference Proceedings %A Havran, Vlastimil %A Damez, Cyrille %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T An Efficient Spatio-temporal Architecture for Animation Rendering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2C20-6 %F EDOC: 201824 %F OTHER: Local-ID: C125675300671F7B-375DE41ADBC27783C1256D2500414C13-Havran2003:EGSR %R 10.2312/EGWR/EGWR03/106-117 %D 2003 %B 14th Eurographics Workshop on Rendering Techniques %Z date of event: 2003-06-25 - 2003-06-27 %C Leuven, Belgium %X Producing high quality animations featuring rich object appearance and <br>compelling lighting effects is very time consuming using traditional <br>frame-by-frame rendering systems. In this paper we present a rendering <br>architecture for computing multiple frames at once by exploiting the coherence <br>between image samples in the temporal domain. For each sample representing a <br>given point in the scene we update its view-dependent components for each frame<br>and add its contribution to pixels identified through the compensation of <br>camera and object motion. This leads naturally to a high quality motion blur <br>and significantly reduces the cost of illumination computations. The required <br>visibility information is provided using a custom ray tracing acceleration data <br>structure for multiple frames simultaneously. We demonstrate that precise<br>and costly global illumination techniques such as bidirectional path tracing <br>become affordable in this rendering architecture. %B Rendering Techniques 2003 %E Christensen, Per; Cohen-Or, Daniel %P 106 - 117, 303 %I The Eurographics Association %@ 1-58113-754-0
Hangelbroek, T., Nürnberger, G., Rössl, C., Seidel, H.-P., and Zeilfelder, F. 2003. The dimension of $C^1$ splines of arbitrary degree on a tetrahedral partition. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
We consider the linear space of piecewise polynomials in three variables which are globally smooth, i.e., trivariate $C^1$ splines. The splines are defined on a uniform tetrahedral partition $\Delta$, which is a natural generalization of the four-directional mesh. By using Bernstein-B{\´e}zier techniques, we establish formulae for the dimension of the $C^1$ splines of arbitrary degree.
Export
BibTeX
@techreport{HangelbroekNurnbergerRoesslSeidelZeilfelder2003, TITLE = {The dimension of \$C{\textasciicircum}1\$ splines of arbitrary degree on a tetrahedral partition}, AUTHOR = {Hangelbroek, Thomas and N{\"u}rnberger, G{\"u}nther and R{\"o}ssl, Christian and Seidel, Hans-Peter and Zeilfelder, Frank}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2003-4-005}, NUMBER = {MPI-I-2003-4-005}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {We consider the linear space of piecewise polynomials in three variables which are globally smooth, i.e., trivariate $C^1$ splines. The splines are defined on a uniform tetrahedral partition $\Delta$, which is a natural generalization of the four-directional mesh. By using Bernstein-B{\&#180;e}zier techniques, we establish formulae for the dimension of the $C^1$ splines of arbitrary degree.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Hangelbroek, Thomas %A N&#252;rnberger, G&#252;nther %A R&#246;ssl, Christian %A Seidel, Hans-Peter %A Zeilfelder, Frank %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T The dimension of $C^1$ splines of arbitrary degree on a tetrahedral partition : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-6887-A %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2003-4-005 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2003 %P 39 p. %X We consider the linear space of piecewise polynomials in three variables which are globally smooth, i.e., trivariate $C^1$ splines. The splines are defined on a uniform tetrahedral partition $\Delta$, which is a natural generalization of the four-directional mesh. By using Bernstein-B{\&#180;e}zier techniques, we establish formulae for the dimension of the $C^1$ splines of arbitrary degree. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Granier, X., Goesele, M., Heidrich, W., and Seidel, H.-P. 2003. Interactive Visualization of Complex Real-world Light Sources. Proceedings of the 11th Pacific Conference on Computer Graphics and Applications (PG 2003), IEEE.
Abstract
Interactive visualization of complex, real-world light<br>sources has so far not been feasible. In this paper, we<br>present an hardware accelerated direct lighting algorithm<br>based on a recent high quality light source acquisition technique.<br>By introducing an approximate reconstruction of the<br>exact model, a multi-pass rendering approach, and a compact<br>data representation, we are able to achieve interactive<br>frame rates. The method is part of the processing pipeline<br>from light source acquisition to high quality lighting of a<br>virtual world.
Export
BibTeX
@inproceedings{Granier-et-al_PG03, TITLE = {Interactive Visualization of Complex Real-world Light Sources}, AUTHOR = {Granier, Xavier and Goesele, Michael and Heidrich, Wolfgang and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-2028-6}, DOI = {10.1109/PCCGA.2003.1238247}, LOCALID = {Local-ID: C125675300671F7B-E31E17CB825AF057C1256D7B00743765-Granier:2003:IVO}, PUBLISHER = {IEEE}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {Interactive visualization of complex, real-world light<br>sources has so far not been feasible. In this paper, we<br>present an hardware accelerated direct lighting algorithm<br>based on a recent high quality light source acquisition technique.<br>By introducing an approximate reconstruction of the<br>exact model, a multi-pass rendering approach, and a compact<br>data representation, we are able to achieve interactive<br>frame rates. The method is part of the processing pipeline<br>from light source acquisition to high quality lighting of a<br>virtual world.}, BOOKTITLE = {Proceedings of the 11th Pacific Conference on Computer Graphics and Applications (PG 2003)}, EDITOR = {Rokne, Jon and Klein, Reinhard and Wang, Wenping}, PAGES = {59--66}, ADDRESS = {Canmore, Canada}, }
Endnote
%0 Conference Proceedings %A Granier, Xavier %A Goesele, Michael %A Heidrich, Wolfgang %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive Visualization of Complex Real-world Light Sources : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2D4F-9 %F EDOC: 201979 %F OTHER: Local-ID: C125675300671F7B-E31E17CB825AF057C1256D7B00743765-Granier:2003:IVO %R 10.1109/PCCGA.2003.1238247 %D 2003 %B 11th Pacific Conference on Computer Graphics and Applications %Z date of event: 2003-10-08 - 2003-10-10 %C Canmore, Canada %X Interactive visualization of complex, real-world light<br>sources has so far not been feasible. In this paper, we<br>present an hardware accelerated direct lighting algorithm<br>based on a recent high quality light source acquisition technique.<br>By introducing an approximate reconstruction of the<br>exact model, a multi-pass rendering approach, and a compact<br>data representation, we are able to achieve interactive<br>frame rates. The method is part of the processing pipeline<br>from light source acquisition to high quality lighting of a<br>virtual world. %B Proceedings of the 11th Pacific Conference on Computer Graphics and Applications %E Rokne, Jon; Klein, Reinhard; Wang, Wenping %P 59 - 66 %I IEEE %@ 0-7695-2028-6
Goesele, M., Fuchs, C., and Seidel, H.-P. 2003a. Accuracy of 3D Range Scanners by Measurement of the Slanted Edge Modulation Transfer Function. Proceedings of the 4th International Conference on 3D Digital Imaging and Modeling (3DIM 2003), IEEE.
Abstract
We estimate the accuracy of a 3D~range scanner in terms of its<br>spatial frequency response. We determine a scanner's modulation<br>transfer function (MTF) in order to measure its frequency response.<br>A slanted edge is scanned from which we derive a superresolution<br>edge profile. Its Fourier transform is compared to the Fourier<br>transform of an ideal edge in order to determine the MTF of the<br>device. This allows us to determine how well small details can be<br>acquired by the 3D~scanner. We report the results of several<br>measurements with two scanners under various conditions.
Export
BibTeX
@inproceedings{Goesele-et-al_3DIM03, TITLE = {Accuracy of {3D} Range Scanners by Measurement of the Slanted Edge Modulation Transfer Function}, AUTHOR = {Goesele, Michael and Fuchs, Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-1991-1}, DOI = {10.1109/IM.2003.1240230}, LOCALID = {Local-ID: C125675300671F7B-1D1EDF899D4E8731C1256D2E0029DF07-Goesele:2003:A3R}, PUBLISHER = {IEEE}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {We estimate the accuracy of a 3D~range scanner in terms of its<br>spatial frequency response. We determine a scanner's modulation<br>transfer function (MTF) in order to measure its frequency response.<br>A slanted edge is scanned from which we derive a superresolution<br>edge profile. Its Fourier transform is compared to the Fourier<br>transform of an ideal edge in order to determine the MTF of the<br>device. This allows us to determine how well small details can be<br>acquired by the 3D~scanner. We report the results of several<br>measurements with two scanners under various conditions.}, BOOKTITLE = {Proceedings of the 4th International Conference on 3D Digital Imaging and Modeling (3DIM 2003)}, EDITOR = {Rioux, Marc and Godin, Guy and Boulanger, Pierre}, PAGES = {37--44}, ADDRESS = {Banff, Canada}, }
Endnote
%0 Conference Proceedings %A Goesele, Michael %A Fuchs, Christian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Accuracy of 3D Range Scanners by Measurement of the Slanted Edge Modulation Transfer Function : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2BDD-A %F EDOC: 201872 %F OTHER: Local-ID: C125675300671F7B-1D1EDF899D4E8731C1256D2E0029DF07-Goesele:2003:A3R %R 10.1109/IM.2003.1240230 %D 2003 %B 4th International Conference on 3D Digital Imaging and Modeling %Z date of event: 2003-10-06 - 2003-10-10 %C Banff, Canada %X We estimate the accuracy of a 3D~range scanner in terms of its<br>spatial frequency response. We determine a scanner's modulation<br>transfer function (MTF) in order to measure its frequency response.<br>A slanted edge is scanned from which we derive a superresolution<br>edge profile. Its Fourier transform is compared to the Fourier<br>transform of an ideal edge in order to determine the MTF of the<br>device. This allows us to determine how well small details can be<br>acquired by the 3D~scanner. We report the results of several<br>measurements with two scanners under various conditions. %B Proceedings of the 4th International Conference on 3D Digital Imaging and Modeling %E Rioux, Marc; Godin, Guy; Boulanger, Pierre %P 37 - 44 %I IEEE %@ 0-7695-1991-1
Goesele, M., Granier, X., Heidrich, W., and Seidel, H.-P. 2003b. Accurate Light Source Acquisition and Rendering. ACM Transactions on Graphics (Proc. SIGGRAPH 2003), ACM.
Abstract
Realistic image synthesis requires both complex and realistic models<br>of real-world light sources and efficient rendering algorithms to deal<br>with them. In this paper, we describe a processing pipeline for<br>dealing with complex light sources from acquisition to global<br>illumination rendering. We carefully design optical filters to<br>guarantee high precision measurements of real-world light sources. We<br>discuss two practically feasible setups that allow us to measure light<br>sources with different characteristics.<br>Finally, we introduce an efficient importance sampling <br>algorithm for our representation that can be used, for example, in<br>conjunction with Photon Maps.
Export
BibTeX
@inproceedings{Goesele-et-al_SIGGRAPH03, TITLE = {Accurate Light Source Acquisition and Rendering}, AUTHOR = {Goesele, Michael and Granier, Xavier and Heidrich, Wolfgang and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/882262.882316}, LOCALID = {Local-ID: C125675300671F7B-82DCF25CF25AAD0BC1256CF4002FC3C0-Goesele:2003:ALS}, PUBLISHER = {ACM}, PUBLISHER = {ACM}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {Realistic image synthesis requires both complex and realistic models<br>of real-world light sources and efficient rendering algorithms to deal<br>with them. In this paper, we describe a processing pipeline for<br>dealing with complex light sources from acquisition to global<br>illumination rendering. We carefully design optical filters to<br>guarantee high precision measurements of real-world light sources. We<br>discuss two practically feasible setups that allow us to measure light<br>sources with different characteristics.<br>Finally, we introduce an efficient importance sampling <br>algorithm for our representation that can be used, for example, in<br>conjunction with Photon Maps.}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2003}, EDITOR = {Hodgins, Jessica K.}, PAGES = {621--630}, JOURNAL = {ACM Transactions on Graphics (Proc. SIGGRAPH)}, VOLUME = {22}, ISSUE = {3}, ADDRESS = {San Diego, USA}, }
Endnote
%0 Conference Proceedings %A Goesele, Michael %A Granier, Xavier %A Heidrich, Wolfgang %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Accurate Light Source Acquisition and Rendering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2BE1-B %F EDOC: 201975 %F OTHER: Local-ID: C125675300671F7B-82DCF25CF25AAD0BC1256CF4002FC3C0-Goesele:2003:ALS %R 10.1145/882262.882316 %D 2003 %B ACM SIGGRAPH 2003 %Z date of event: 2003-07-27 - 2003-07-31 %C San Diego, USA %X Realistic image synthesis requires both complex and realistic models<br>of real-world light sources and efficient rendering algorithms to deal<br>with them. In this paper, we describe a processing pipeline for<br>dealing with complex light sources from acquisition to global<br>illumination rendering. We carefully design optical filters to<br>guarantee high precision measurements of real-world light sources. We<br>discuss two practically feasible setups that allow us to measure light<br>sources with different characteristics.<br>Finally, we introduce an efficient importance sampling <br>algorithm for our representation that can be used, for example, in<br>conjunction with Photon Maps. %B Proceedings of ACM SIGGRAPH 2003 %E Hodgins, Jessica K. %P 621 - 630 %I ACM %J ACM Transactions on Graphics %V 22 %N 3 %I ACM %@ false
Drago, F., Martens, W.L., Myszkowski, K., and Seidel, H.-P. 2003. Perceptual Evaluation of Tone Mapping Operators. Proceedings of the ACM SIGGRAPH 2003 Conference on Sketches and Applications, ACM.
Export
BibTeX
@inproceedings{DBLP:conf/siggraph/DragoMMS03, TITLE = {Perceptual Evaluation of Tone Mapping Operators}, AUTHOR = {Drago, Fr{\'e}d{\'e}ric and Martens, William L. and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4503-7466-8}, DOI = {10.1145/965400.965487}, PUBLISHER = {ACM}, YEAR = {2003}, DATE = {2003}, BOOKTITLE = {Proceedings of the ACM SIGGRAPH 2003 Conference on Sketches and Applications}, EDITOR = {Rockwood, Alyn P.}, PAGES = {1--1}, ADDRESS = {San Diego, CA, USA}, }
Endnote
%0 Conference Proceedings %A Drago, Fr&#233;d&#233;ric %A Martens, William L. %A Myszkowski, Karol %A Seidel, Hans-Peter %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptual Evaluation of Tone Mapping Operators : %G eng %U http://hdl.handle.net/21.11116/0000-000F-0CB3-A %R 10.1145/965400.965487 %D 2003 %B ACM SIGGRAPH 2003 Conference on Sketches and Applications %Z date of event: 2003-07-27 - 2003-07-31 %C San Diego, CA, USA %B Proceedings of the ACM SIGGRAPH 2003 Conference on Sketches and Applications %E Rockwood, Alyn P. %P 1 - 1 %I ACM %@ 978-1-4503-7466-8
Daubert, K., Heidrich, W., Kautz, J., Dischler, J.-M., and Seidel, H.-P. 2003. Efficient Light Transport Using Precomputed Visibility. IEEE Computer Graphics and Applications23, 3.
Abstract
Visibility computations are the most time-consuming part of<br>global illumination algorithms. The cost is amplified by the<br>fact that quite often identical or similar information is<br>recomputed multiple times. In particular this is the case when<br>multiple images of the same scene are to be generated under<br>varying lighting conditions and/or viewpoints. But even for a<br>single image with static illumination, the computations could be<br>accelerated by reusing visibility information for many different<br>light paths.<br> <br>In this paper we describe a general method of precomputing,<br>storing, and reusing visibility information for light transport<br>in a number of different types of scenes. In particular, we<br>consider general parametric surfaces, triangle meshes without a<br>global parameterization, and participating media.<br><br>We also reorder the light transport in such a way that the<br>visibility information is accessed in structured memory access<br>patterns. This yields a method that is well suited for SIMD-style<br>parallelization of the light transport, and can efficiently be<br>implemented both in software and using graphics hardware. We<br>finally demonstrate applications of the method to highly<br>efficient precomputation of BRDFs, bidirectional texture<br>functions, light fields, as well as near-interactive volume<br>lighting.
Export
BibTeX
@article{Daubert-et-al_IEEE.CGA.03, TITLE = {Efficient Light Transport Using Precomputed Visibility}, AUTHOR = {Daubert, Katja and Heidrich, Wolfgang and Kautz, Jan and Dischler, Jean-Michel and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0272-1716}, DOI = {10.1109/MCG.2003.1198260}, LOCALID = {Local-ID: C125675300671F7B-6D1DC36F06CF2CE9C1256C3000339FD4-Daubert:2003:ELT}, PUBLISHER = {IEEE Computer Society :}, ADDRESS = {Los Alamitos, CA}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {Visibility computations are the most time-consuming part of<br>global illumination algorithms. The cost is amplified by the<br>fact that quite often identical or similar information is<br>recomputed multiple times. In particular this is the case when<br>multiple images of the same scene are to be generated under<br>varying lighting conditions and/or viewpoints. But even for a<br>single image with static illumination, the computations could be<br>accelerated by reusing visibility information for many different<br>light paths.<br> <br>In this paper we describe a general method of precomputing,<br>storing, and reusing visibility information for light transport<br>in a number of different types of scenes. In particular, we<br>consider general parametric surfaces, triangle meshes without a<br>global parameterization, and participating media.<br><br>We also reorder the light transport in such a way that the<br>visibility information is accessed in structured memory access<br>patterns. This yields a method that is well suited for SIMD-style<br>parallelization of the light transport, and can efficiently be<br>implemented both in software and using graphics hardware. We<br>finally demonstrate applications of the method to highly<br>efficient precomputation of BRDFs, bidirectional texture<br>functions, light fields, as well as near-interactive volume<br>lighting.}, JOURNAL = {IEEE Computer Graphics and Applications}, VOLUME = {23}, NUMBER = {3}, PAGES = {28--37}, }
Endnote
%0 Journal Article %A Daubert, Katja %A Heidrich, Wolfgang %A Kautz, Jan %A Dischler, Jean-Michel %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Efficient Light Transport Using Precomputed Visibility : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2CD6-E %F EDOC: 201800 %F OTHER: Local-ID: C125675300671F7B-6D1DC36F06CF2CE9C1256C3000339FD4-Daubert:2003:ELT %R 10.1109/MCG.2003.1198260 %D 2003 %* Review method: peer-reviewed %X Visibility computations are the most time-consuming part of<br>global illumination algorithms. The cost is amplified by the<br>fact that quite often identical or similar information is<br>recomputed multiple times. In particular this is the case when<br>multiple images of the same scene are to be generated under<br>varying lighting conditions and/or viewpoints. But even for a<br>single image with static illumination, the computations could be<br>accelerated by reusing visibility information for many different<br>light paths.<br> <br>In this paper we describe a general method of precomputing,<br>storing, and reusing visibility information for light transport<br>in a number of different types of scenes. In particular, we<br>consider general parametric surfaces, triangle meshes without a<br>global parameterization, and participating media.<br><br>We also reorder the light transport in such a way that the<br>visibility information is accessed in structured memory access<br>patterns. This yields a method that is well suited for SIMD-style<br>parallelization of the light transport, and can efficiently be<br>implemented both in software and using graphics hardware. We<br>finally demonstrate applications of the method to highly<br>efficient precomputation of BRDFs, bidirectional texture<br>functions, light fields, as well as near-interactive volume<br>lighting. %J IEEE Computer Graphics and Applications %V 23 %N 3 %& 28 %P 28 - 37 %I IEEE Computer Society : %C Los Alamitos, CA %@ false
Carranza, J., Theobalt, C., Magnor, M., and Seidel, H.-P. 2003. Free-viewpoint Video of Human Actors. ACM Transactions on Graphics, ACM.
Abstract
In free-viewpoint video, the viewer can interactively choose his<br> viewpoint in \mbox{3-D} space to observe the action of a dynamic<br> real-world scene from arbitrary perspectives.<br><br> The human body and its motion plays a central role in most visual media<br> and its structure can be exploited for robust motion estimation and efficient <br> visualization. This paper describes a system that uses multi-view<br> synchronized video footage of an actor's performance to<br> estimate motion parameters and to interactively re-render the<br> actor's appearance from any viewpoint.<br><br> <br> The actor's silhouettes are extracted from<br> synchronized video frames via background<br> segmentation and then used to determine a sequence of poses<br> for a \mbox{3D} human body model.<br> By employing multi-view texturing during rendering, time-dependent changes <br> in the body surface are reproduced in high detail.<br> The motion capture subsystem runs offline, is non-intrusive, yields robust <br> motion parameter estimates, and can cope with a<br> broad range of motion.<br> The rendering subsystem runs at real-time frame rates using ubiquous<br> graphics hardware, yielding a highly naturalistic impression of the<br> actor.<br> The actor can be placed in virtual environments to create composite dynamic <br> scenes. Free-viewpoint video allows the creation of camera fly-throughs or <br> viewing the action interactively from arbitrary perspectives.
Export
BibTeX
@inproceedings{Carranza-et-al_SIGGRAPH03, TITLE = {Free-viewpoint Video of Human Actors}, AUTHOR = {Carranza, Joel and Theobalt, Christian and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/882262.882309}, LOCALID = {Local-ID: C125675300671F7B-A3BEEB6B489CB9B8C1256CFB00455F56-CarrTheoSig2003}, PUBLISHER = {ACM}, PUBLISHER = {ACM}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {In free-viewpoint video, the viewer can interactively choose his<br> viewpoint in \mbox{3-D} space to observe the action of a dynamic<br> real-world scene from arbitrary perspectives.<br><br> The human body and its motion plays a central role in most visual media<br> and its structure can be exploited for robust motion estimation and efficient <br> visualization. This paper describes a system that uses multi-view<br> synchronized video footage of an actor's performance to<br> estimate motion parameters and to interactively re-render the<br> actor's appearance from any viewpoint.<br><br> <br> The actor's silhouettes are extracted from<br> synchronized video frames via background<br> segmentation and then used to determine a sequence of poses<br> for a \mbox{3D} human body model.<br> By employing multi-view texturing during rendering, time-dependent changes <br> in the body surface are reproduced in high detail.<br> The motion capture subsystem runs offline, is non-intrusive, yields robust <br> motion parameter estimates, and can cope with a<br> broad range of motion.<br> The rendering subsystem runs at real-time frame rates using ubiquous<br> graphics hardware, yielding a highly naturalistic impression of the<br> actor.<br> The actor can be placed in virtual environments to create composite dynamic <br> scenes. Free-viewpoint video allows the creation of camera fly-throughs or <br> viewing the action interactively from arbitrary perspectives.}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2003 (SIGGRAPH-03)}, EDITOR = {Hodgins, Jessica K.}, PAGES = {569--577}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {22}, ISSUE = {3}, ADDRESS = {San Diego, USA}, }
Endnote
%0 Conference Proceedings %A Carranza, Joel %A Theobalt, Christian %A Magnor, Marcus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Programming Logics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Free-viewpoint Video of Human Actors : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2D0D-C %F EDOC: 201929 %F OTHER: Local-ID: C125675300671F7B-A3BEEB6B489CB9B8C1256CFB00455F56-CarrTheoSig2003 %R 10.1145/882262.882309 %D 2003 %B ACM SIGGRAPH 2003 %Z date of event: 2003-07-27 - 2003-07-31 %C San Diego, USA %X In free-viewpoint video, the viewer can interactively choose his<br> viewpoint in \mbox{3-D} space to observe the action of a dynamic<br> real-world scene from arbitrary perspectives.<br><br> The human body and its motion plays a central role in most visual media<br> and its structure can be exploited for robust motion estimation and efficient <br> visualization. This paper describes a system that uses multi-view<br> synchronized video footage of an actor's performance to<br> estimate motion parameters and to interactively re-render the<br> actor's appearance from any viewpoint.<br><br> <br> The actor's silhouettes are extracted from<br> synchronized video frames via background<br> segmentation and then used to determine a sequence of poses<br> for a \mbox{3D} human body model.<br> By employing multi-view texturing during rendering, time-dependent changes <br> in the body surface are reproduced in high detail.<br> The motion capture subsystem runs offline, is non-intrusive, yields robust <br> motion parameter estimates, and can cope with a<br> broad range of motion.<br> The rendering subsystem runs at real-time frame rates using ubiquous<br> graphics hardware, yielding a highly naturalistic impression of the<br> actor.<br> The actor can be placed in virtual environments to create composite dynamic <br> scenes. Free-viewpoint video allows the creation of camera fly-throughs or <br> viewing the action interactively from arbitrary perspectives. %B Proceedings of ACM SIGGRAPH 2003 (SIGGRAPH-03) %E Hodgins, Jessica K. %P 569 - 577 %I ACM %J ACM Transactions on Graphics %V 22 %N 3 %I ACM %@ false
Brabec, S. and Seidel, H.-P. 2003. Shadow Volumes on Programmable Graphics Hardware. Computer Graphics Forum22, 3.
Abstract
One of the best choices for fast, high quality shadows is the shadow<br>volume algorithm. However, for real time applications the extraction<br>of silhouette edges can significantly burden the CPU, especially<br>with highly tessellated input geometry or when complex geometry<br>shaders are applied.<br><br>In this paper we show how this last, expensive part of the shadow<br>volume method can be implemented on programmable graphics hardware.<br>This way, the originally hybrid shadow volumes algorithm can now be<br>reformulated as a purely hardware-accelerated approach.<br><br>The benefits of this implementation is not only the increase in speed.<br>Firstly, all computations now run on the same hardware <br>resulting in consistent precision within all steps of the<br>algorithm. Secondly, programmable vertex transformations<br>are no longer problematic when applied to shadow casting objects.
Export
BibTeX
@article{Brabec-Seidel_EUROGRAPHICS03, TITLE = {Shadow Volumes on Programmable Graphics Hardware}, AUTHOR = {Brabec, Stefan and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/1467-8659.00691}, LOCALID = {Local-ID: C125675300671F7B-AFF5555AF0AF457CC1256D250049229E-Brabec2003:SPG}, PUBLISHER = {Blackwell-Wiley}, PUBLISHER = {Blackwell}, ADDRESS = {Oxford}, ADDRESS = {Oxford, UK}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {One of the best choices for fast, high quality shadows is the shadow<br>volume algorithm. However, for real time applications the extraction<br>of silhouette edges can significantly burden the CPU, especially<br>with highly tessellated input geometry or when complex geometry<br>shaders are applied.<br><br>In this paper we show how this last, expensive part of the shadow<br>volume method can be implemented on programmable graphics hardware.<br>This way, the originally hybrid shadow volumes algorithm can now be<br>reformulated as a purely hardware-accelerated approach.<br><br>The benefits of this implementation is not only the increase in speed.<br>Firstly, all computations now run on the same hardware <br>resulting in consistent precision within all steps of the<br>algorithm. Secondly, programmable vertex transformations<br>are no longer problematic when applied to shadow casting objects.}, JOURNAL = {Computer Graphics Forum}, VOLUME = {22}, NUMBER = {3}, PAGES = {433--440}, BOOKTITLE = {EUROGRAPHICS 2003}, EDITOR = {Brunet, Pere and Fellner, Dieter W.}, }
Endnote
%0 Journal Article %A Brabec, Stefan %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Shadow Volumes on Programmable Graphics Hardware : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2E0E-4 %F EDOC: 201837 %F OTHER: Local-ID: C125675300671F7B-AFF5555AF0AF457CC1256D250049229E-Brabec2003:SPG %R 10.1111/1467-8659.00691 %D 2003 %X One of the best choices for fast, high quality shadows is the shadow<br>volume algorithm. However, for real time applications the extraction<br>of silhouette edges can significantly burden the CPU, especially<br>with highly tessellated input geometry or when complex geometry<br>shaders are applied.<br><br>In this paper we show how this last, expensive part of the shadow<br>volume method can be implemented on programmable graphics hardware.<br>This way, the originally hybrid shadow volumes algorithm can now be<br>reformulated as a purely hardware-accelerated approach.<br><br>The benefits of this implementation is not only the increase in speed.<br>Firstly, all computations now run on the same hardware <br>resulting in consistent precision within all steps of the<br>algorithm. Secondly, programmable vertex transformations<br>are no longer problematic when applied to shadow casting objects. %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 22 %N 3 %& 433 %P 433 - 440 %I Blackwell-Wiley %C Oxford %@ false %B EUROGRAPHICS 2003 %I Blackwell %C Oxford, UK
Bekaert, P., Slusallek, P., Cools, R., Havran, V., and Seidel, H.-P. 2003. A custom designed density estimation method for light transport. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
We present a new Monte Carlo method for solving the global illumination problem in environments with general geometry descriptions and light emission and scattering properties. Current Monte Carlo global illumination algorithms are based on generic density estimation techniques that do not take into account any knowledge about the nature of the data points --- light and potential particle hit points --- from which a global illumination solution is to be reconstructed. We propose a novel estimator, especially designed for solving linear integral equations such as the rendering equation. The resulting single-pass global illumination algorithm promises to combine the flexibility and robustness of bi-directional path tracing with the efficiency of algorithms such as photon mapping.
Export
BibTeX
@techreport{BekaertSlusallekCoolsHavranSeidel, TITLE = {A custom designed density estimation method for light transport}, AUTHOR = {Bekaert, Philippe and Slusallek, Philipp and Cools, Ronald and Havran, Vlastimil and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2003-4-004}, NUMBER = {MPI-I-2003-4-004}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {We present a new Monte Carlo method for solving the global illumination problem in environments with general geometry descriptions and light emission and scattering properties. Current Monte Carlo global illumination algorithms are based on generic density estimation techniques that do not take into account any knowledge about the nature of the data points --- light and potential particle hit points --- from which a global illumination solution is to be reconstructed. We propose a novel estimator, especially designed for solving linear integral equations such as the rendering equation. The resulting single-pass global illumination algorithm promises to combine the flexibility and robustness of bi-directional path tracing with the efficiency of algorithms such as photon mapping.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Bekaert, Philippe %A Slusallek, Philipp %A Cools, Ronald %A Havran, Vlastimil %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Cluster of Excellence Multimodal Computing and Interaction External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A custom designed density estimation method for light transport : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-6922-2 %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2003-4-004 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2003 %P 28 p. %X We present a new Monte Carlo method for solving the global illumination problem in environments with general geometry descriptions and light emission and scattering properties. Current Monte Carlo global illumination algorithms are based on generic density estimation techniques that do not take into account any knowledge about the nature of the data points --- light and potential particle hit points --- from which a global illumination solution is to be reconstructed. We propose a novel estimator, especially designed for solving linear integral equations such as the rendering equation. The resulting single-pass global illumination algorithm promises to combine the flexibility and robustness of bi-directional path tracing with the efficiency of algorithms such as photon mapping. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Albrecht, I., Haber, J., and Seidel, H.-P. 2003. Construction and Animation of Anatomically Based Human Hand Models. ACM SIGGRAPH/Eurographics Symposium on Computer Animation (SIGGRAPH-SCA 2003 ), The Eurographics Association.
Abstract
The human hand is a masterpiece of mechanical complexity, able to perform<br>fine motor manipulations and powerful work alike. Designing an animatable<br>human hand model that features the abilities of the archetype created by<br>Nature requires a great deal of anatomical detail to be modeled. In this<br>paper, we present a human hand model with underlying anatomical<br>structure. Animation of the hand model is controlled by muscle contraction<br>values. We employ a physically based hybrid muscle model to convert these<br>contraction values into movement of skin and bones. Pseudo muscles directly<br>control the rotation of bones based on anatomical data and mechanical laws,<br>while geometric muscles deform the skin tissue using a mass-spring<br>system. Thus, resulting animations automatically exhibit anatomically and<br>physically correct finger movements and skin deformations. In addition, we<br>present a deformation technique to create individual hand models from<br>photographs. A radial basis warping function is set up from the<br>correspondence of feature points and applied to the complete structure of<br>the reference hand model, making the deformed hand model instantly<br>animatable.
Export
BibTeX
@inproceedings{Albrecht-et-al_SIGGRAPH-SCA03_, TITLE = {Construction and Animation of Anatomically Based Human Hand Models}, AUTHOR = {Albrecht, Irene and Haber, J{\"o}rg and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {1-58113-659-5}, DOI = {10.2312/SCA03/098-109}, LOCALID = {Local-ID: C125675300671F7B-7BB1D8BD557CD39CC1256D2D0033E300-Albrecht:CAABHHM}, PUBLISHER = {The Eurographics Association}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {The human hand is a masterpiece of mechanical complexity, able to perform<br>fine motor manipulations and powerful work alike. Designing an animatable<br>human hand model that features the abilities of the archetype created by<br>Nature requires a great deal of anatomical detail to be modeled. In this<br>paper, we present a human hand model with underlying anatomical<br>structure. Animation of the hand model is controlled by muscle contraction<br>values. We employ a physically based hybrid muscle model to convert these<br>contraction values into movement of skin and bones. Pseudo muscles directly<br>control the rotation of bones based on anatomical data and mechanical laws,<br>while geometric muscles deform the skin tissue using a mass-spring<br>system. Thus, resulting animations automatically exhibit anatomically and<br>physically correct finger movements and skin deformations. In addition, we<br>present a deformation technique to create individual hand models from<br>photographs. A radial basis warping function is set up from the<br>correspondence of feature points and applied to the complete structure of<br>the reference hand model, making the deformed hand model instantly<br>animatable.}, BOOKTITLE = {ACM SIGGRAPH/Eurographics Symposium on Computer Animation (SIGGRAPH-SCA 2003 )}, EDITOR = {Breen, Dave and Lin, Ming}, PAGES = {98--109, 368}, ADDRESS = {San Diego, USA}, }
Endnote
%0 Conference Proceedings %A Albrecht, Irene %A Haber, J&#246;rg %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Construction and Animation of Anatomically Based Human Hand Models : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2C9D-1 %F EDOC: 201820 %F OTHER: Local-ID: C125675300671F7B-7BB1D8BD557CD39CC1256D2D0033E300-Albrecht:CAABHHM %R 10.2312/SCA03/098-109 %D 2003 %B ACM SIGGRAPH/Eurographics Symposium on Computer Animation %Z date of event: 2003-07-26 - 2003-07-27 %C San Diego, USA %X The human hand is a masterpiece of mechanical complexity, able to perform<br>fine motor manipulations and powerful work alike. Designing an animatable<br>human hand model that features the abilities of the archetype created by<br>Nature requires a great deal of anatomical detail to be modeled. In this<br>paper, we present a human hand model with underlying anatomical<br>structure. Animation of the hand model is controlled by muscle contraction<br>values. We employ a physically based hybrid muscle model to convert these<br>contraction values into movement of skin and bones. Pseudo muscles directly<br>control the rotation of bones based on anatomical data and mechanical laws,<br>while geometric muscles deform the skin tissue using a mass-spring<br>system. Thus, resulting animations automatically exhibit anatomically and<br>physically correct finger movements and skin deformations. In addition, we<br>present a deformation technique to create individual hand models from<br>photographs. A radial basis warping function is set up from the<br>correspondence of feature points and applied to the complete structure of<br>the reference hand model, making the deformed hand model instantly<br>animatable. %B ACM SIGGRAPH/Eurographics Symposium on Computer Animation %E Breen, Dave; Lin, Ming %P 98 - 109, 368 %I The Eurographics Association %@ 1-58113-659-5
2002
Zobel, M., Fritz, M., and Scholz, I. 2002. Object Tracking and Pose Estimation Using Light‐feld Object Models. Vision, Modeling, and Visualization 2002 (VMV 2002), Akademische Verlagsgesellschaft Aka.
Export
BibTeX
@inproceedings{zobel02, TITLE = {Object Tracking and Pose Estimation Using Light-feld Object Models}, AUTHOR = {Zobel, Matthias and Fritz, Mario and Scholz, Ingo}, LANGUAGE = {eng}, ISBN = {3-89838-034-3}, PUBLISHER = {Akademische Verlagsgesellschaft Aka}, YEAR = {2002}, DATE = {2002}, BOOKTITLE = {Vision, Modeling, and Visualization 2002 (VMV 2002)}, EDITOR = {Greiner, G{\"u}nther and Niemann, Heinrich and Ertl, Thomas and Girod, Bernd and Seidel, Hans-Peter}, PAGES = {371--378}, ADDRESS = {Erlangen, Germany}, }
Endnote
%0 Conference Proceedings %A Zobel, Matthias %A Fritz, Mario %A Scholz, Ingo %+ External Organizations External Organizations External Organizations %T Object Tracking and Pose Estimation Using Light&#8208;feld Object Models : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0026-B6B5-3 %D 2002 %B 7th International Fall Workshop Vision, Modeling, and Visualization %Z date of event: 2002-11-20 - 2002-11-22 %C Erlangen, Germany %B Vision, Modeling, and Visualization 2002 %E Greiner, G&#252;nther; Niemann, Heinrich; Ertl, Thomas; Girod, Bernd; Seidel, Hans-Peter %P 371 - 378 %I Akademische Verlagsgesellschaft Aka %@ 3-89838-034-3
Zeilfelder, F. and Seidel, H.-P. 2002. Splines over Triangulations. In: The Handbook of Computer Aided Geometric Design. Elsevier, Amsterdam, the Netherlands.
Abstract
The aim of this survey is to give an overview of the field <br>of splines over triangulations. We summarize results on <br>Bernstein-B\'ezier techniques, the dimension of bivariate<br>splines, interpolation by bivariate splines, and we describe <br>the simplex spline approach.
Export
BibTeX
@incollection{ZeilfelderSeidel2002, TITLE = {Splines over Triangulations}, AUTHOR = {Zeilfelder, Frank and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-0-444-51104-1}, DOI = {10.1016/B978-044451104-1/50029-0}, LOCALID = {Local-ID: C125675300671F7B-19B3DF8B8053D55BC1256AB9002C2BD8-ZeilfelderSeidel2002}, PUBLISHER = {Elsevier}, ADDRESS = {Amsterdam, the Netherlands}, YEAR = {2002}, DATE = {2002}, ABSTRACT = {The aim of this survey is to give an overview of the field <br>of splines over triangulations. We summarize results on <br>Bernstein-B\'ezier techniques, the dimension of bivariate<br>splines, interpolation by bivariate splines, and we describe <br>the simplex spline approach.}, BOOKTITLE = {The Handbook of Computer Aided Geometric Design}, EDITOR = {Farin, Gerald and Hoschek, Josef and Kim, Myung-Soo and Abma, Doutzen}, PAGES = {701--722}, }
Endnote
%0 Book Section %A Zeilfelder, Frank %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Splines over Triangulations : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-308B-D %F EDOC: 202231 %F OTHER: Local-ID: C125675300671F7B-19B3DF8B8053D55BC1256AB9002C2BD8-ZeilfelderSeidel2002 %R 10.1016/B978-044451104-1/50029-0 %D 2002 %X The aim of this survey is to give an overview of the field <br>of splines over triangulations. We summarize results on <br>Bernstein-B\'ezier techniques, the dimension of bivariate<br>splines, interpolation by bivariate splines, and we describe <br>the simplex spline approach. %B The Handbook of Computer Aided Geometric Design %E Farin, Gerald; Hoschek, Josef; Kim, Myung-Soo; Abma, Doutzen %P 701 - 722 %I Elsevier %C Amsterdam, the Netherlands %@ 978-0-444-51104-1
Yoshizawa, S., Belyaev, A., and Seidel, H.-P. 2002. A Simple Approach to Interactive Free-Form Shape Deformations. Proceedings of the 10th Pacific Conference on Computer Graphics and Applications (PG 2002), IEEE.
Abstract
In this paper, we propose a set of free-form shape deformation<br>techniques. The basic technique can be described as<br>follows. Given a surface represented by a mesh and a control<br>point, for every mesh vertex let us consider the difference<br>between the control point and the vertex. The vertex is shifted by<br>a displacement equal to the difference times a scale factor where<br>the scale factor is given by a function depending nonlinearly on<br>the difference. The function is bump-shaped and depends on a<br>number of parameters. Varying the parameters leads to a rich<br>palette of shape deformations. The proposed techniques include <br>also shape deformations with multiple (real, auxiliary, and virtual)<br>control points and constrained, directional, and anisotropic deformations.<br>We demonstrate how that the proposed set of techniques <br>allows a user to edit a given shape interactively and intuitively. <br>The techniques use no mesh connectivity information and, therefore, <br>can be applied directly to a shape given as a cloud of points.
Export
BibTeX
@inproceedings{Yoshizawa-et-al_PG02, TITLE = {A Simple Approach to Interactive Free-Form Shape Deformations}, AUTHOR = {Yoshizawa, Shin and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-1784-6}, DOI = {10.1109/PCCGA.2002.1167905}, LOCALID = {Local-ID: C125675300671F7B-09C1794DE8E97FD9C1256CAF00716A35-pg02ybs}, PUBLISHER = {IEEE}, YEAR = {2002}, DATE = {2002}, ABSTRACT = {In this paper, we propose a set of free-form shape deformation<br>techniques. The basic technique can be described as<br>follows. Given a surface represented by a mesh and a control<br>point, for every mesh vertex let us consider the difference<br>between the control point and the vertex. The vertex is shifted by<br>a displacement equal to the difference times a scale factor where<br>the scale factor is given by a function depending nonlinearly on<br>the difference. The function is bump-shaped and depends on a<br>number of parameters. Varying the parameters leads to a rich<br>palette of shape deformations. The proposed techniques include <br>also shape deformations with multiple (real, auxiliary, and virtual)<br>control points and constrained, directional, and anisotropic deformations.<br>We demonstrate how that the proposed set of techniques <br>allows a user to edit a given shape interactively and intuitively. <br>The techniques use no mesh connectivity information and, therefore, <br>can be applied directly to a shape given as a cloud of points.}, BOOKTITLE = {Proceedings of the 10th Pacific Conference on Computer Graphics and Applications (PG 2002)}, EDITOR = {Coquillart, Sabine and Shum, Heung-Yeung and Hu, Shi-Min}, PAGES = {471--474}, ADDRESS = {Beijing, China}, }
Endnote
%0 Conference Proceedings %A Yoshizawa, Shin %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Simple Approach to Interactive Free-Form Shape Deformations : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2F0B-2 %F EDOC: 202181 %F OTHER: Local-ID: C125675300671F7B-09C1794DE8E97FD9C1256CAF00716A35-pg02ybs %R 10.1109/PCCGA.2002.1167905 %D 2002 %B 10th Pacific Conference on Computer Graphics and Applications %Z date of event: 2002-10-09 - 2002-10-11 %C Beijing, China %X In this paper, we propose a set of free-form shape deformation<br>techniques. The basic technique can be described as<br>follows. Given a surface represented by a mesh and a control<br>point, for every mesh vertex let us consider the difference<br>between the control point and the vertex. The vertex is shifted by<br>a displacement equal to the difference times a scale factor where<br>the scale factor is given by a function depending nonlinearly on<br>the difference. The function is bump-shaped and depends on a<br>number of parameters. Varying the parameters leads to a rich<br>palette of shape deformations. The proposed techniques include <br>also shape deformations with multiple (real, auxiliary, and virtual)<br>control points and constrained, directional, and anisotropic deformations.<br>We demonstrate how that the proposed set of techniques <br>allows a user to edit a given shape interactively and intuitively. <br>The techniques use no mesh connectivity information and, therefore, <br>can be applied directly to a shape given as a cloud of points. %B Proceedings of the 10th Pacific Conference on Computer Graphics and Applications %E Coquillart, Sabine; Shum, Heung-Yeung; Hu, Shi-Min %P 471 - 474 %I IEEE %@ 0-7695-1784-6
Theobalt, C., Magnor, M., Schüler, P., and Seidel, H.-P. 2002a. Multi-Layer Skeleton Fitting for Online Human Motion Capture. Proceedings of Vision, Modeling, and Visualization 2002 (VMV 2002), Akademische Verlagsgesellschaft Aka GmbH.
Export
BibTeX
@inproceedings{Theobalt-et-al_VMV02, TITLE = {Multi-Layer Skeleton Fitting for Online Human Motion Capture}, AUTHOR = {Theobalt, Christian and Magnor, Marcus and Sch{\"u}ler, Pascal and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-89838-034-3}, LOCALID = {Local-ID: C125675300671F7B-CB29D84DAAB9313AC1256CA20064EB82-TheobaltVMV2002}, PUBLISHER = {Akademische Verlagsgesellschaft Aka GmbH}, YEAR = {2002}, DATE = {2002}, BOOKTITLE = {Proceedings of Vision, Modeling, and Visualization 2002 (VMV 2002)}, PAGES = {471--478}, ADDRESS = {Erlangen, Germany}, }
Endnote
%0 Conference Proceedings %A Theobalt, Christian %A Magnor, Marcus %A Sch&#252;ler, Pascal %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Programming Logics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Multi-Layer Skeleton Fitting for Online Human Motion Capture : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2FFA-8 %F EDOC: 202201 %F OTHER: Local-ID: C125675300671F7B-CB29D84DAAB9313AC1256CA20064EB82-TheobaltVMV2002 %D 2002 %B 7th International Fall Workshop on Vision, Modeling, and Visualization %Z date of event: 2002-11-20 - 2002-11-22 %C Erlangen, Germany %B Proceedings of Vision, Modeling, and Visualization 2002 %P 471 - 478 %I Akademische Verlagsgesellschaft Aka GmbH %@ 3-89838-034-3
Theobalt, C., Magnor, M., Schüler, P., and Seidel, H.-P. 2002b. Combining 2D Feature Tracking and Volume Reconstruction for Online Video-Based Human Motion Capture. Proceedings of the 10th Pacific Conference on Computer Graphics and Applications (PG 2002), IEEE.
Export
BibTeX
@inproceedings{Theobalt-et-al_PG02, TITLE = {Combining {2D} Feature Tracking and Volume Reconstruction for Online Video-Based Human Motion Capture}, AUTHOR = {Theobalt, Christian and Magnor, Marcus and Sch{\"u}ler, Pascal and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-1784-6}, DOI = {10.1109/PCCGA.2002.1167843}, LOCALID = {Local-ID: C125675300671F7B-42A8AB2990056AB1C1256CA200639B58-TheobaltPG2002}, PUBLISHER = {IEEE}, YEAR = {2002}, DATE = {2002}, BOOKTITLE = {Proceedings of the 10th Pacific Conference on Computer Graphics and Applications (PG 2002)}, EDITOR = {Coquillart, Sabine and Shum, Heung-Yeung and Hu, Shi-Min}, PAGES = {96--103}, ADDRESS = {Beijing, China}, }
Endnote
%0 Conference Proceedings %A Theobalt, Christian %A Magnor, Marcus %A Sch&#252;ler, Pascal %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Programming Logics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Combining 2D Feature Tracking and Volume Reconstruction for Online Video-Based Human Motion Capture : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2F39-9 %F EDOC: 202194 %F OTHER: Local-ID: C125675300671F7B-42A8AB2990056AB1C1256CA200639B58-TheobaltPG2002 %R 10.1109/PCCGA.2002.1167843 %D 2002 %B 10th Pacific Conference on Computer Graphics and Applications %Z date of event: 2002-10-09 - 2002-10-11 %C Beijing, China %B Proceedings of the 10th Pacific Conference on Computer Graphics and Applications %E Coquillart, Sabine; Shum, Heung-Yeung; Hu, Shi-Min %P 96 - 103 %I IEEE %@ 0-7695-1784-6
Theisel, H. 2002. Designing 2D Vector Fields of Arbitrary Topology. EUROGRAPHICS 2002, Blackwell.
Abstract
We introduce a scheme of control polygons to design topological skeletons for vector fields of arbitrary topology. Based on this we construct piecewise linear vector fields of exactly the topology specified by the control polygons. This way a controlled construction of vector fields of any topology is possible. Finally we apply this method for topology-preserving compression of vector fields consisting of a simple topology.
Export
BibTeX
@inproceedings{Theisel2002a, TITLE = {Designing {2D} Vector Fields of Arbitrary Topology}, AUTHOR = {Theisel, Holger}, EDITOR = {Drettakis, George and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, LOCALID = {Local-ID: C125675300671F7B-DD202E51D401DD21C1256C320040B5D7-Theisel2002a}, PUBLISHER = {Blackwell}, YEAR = {2002}, DATE = {2002}, ABSTRACT = {We introduce a scheme of control polygons to design topological skeletons for vector fields of arbitrary topology. Based on this we construct piecewise linear vector fields of exactly the topology specified by the control polygons. This way a controlled construction of vector fields of any topology is possible. Finally we apply this method for topology-preserving compression of vector fields consisting of a simple topology.}, BOOKTITLE = {EUROGRAPHICS 2002}, PAGES = {595--604}, SERIES = {Computer Graphics Forum}, ADDRESS = {Saarbr{\"u}cken, Germany}, }
Endnote
%0 Conference Proceedings %A Theisel, Holger %E Drettakis, George %E Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Designing 2D Vector Fields of Arbitrary Topology : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2F54-B %F EDOC: 202196 %F OTHER: Local-ID: C125675300671F7B-DD202E51D401DD21C1256C320040B5D7-Theisel2002a %D 2002 %B EUROGRAPHICS 2002 %Z date of event: 2002-09-02 - 2002-09-06 %C Saarbr&#252;cken, Germany %X We introduce a scheme of control polygons to design topological skeletons for vector fields of arbitrary topology. Based on this we construct piecewise linear vector fields of exactly the topology specified by the control polygons. This way a controlled construction of vector fields of any topology is possible. Finally we apply this method for topology-preserving compression of vector fields consisting of a simple topology. %B EUROGRAPHICS 2002 %P 595 - 604 %I Blackwell %B Computer Graphics Forum %@ false
Tawara, T., Myszkowski, K., and Seidel, H.-P. 2002. Localizing the Final Gathering for Dynamic Scenes using the Photon Map. Proceedings of Vision, Modeling, and Visualization 2002 (VMV 2002), Akademische Verlagsgesellschaft Aka GmbH.
Abstract
Rendering of high quality animations with global <br>illumination effects is very costly using traditional <br>techniques designed for static scenes.<br>In this paper we present an extension of<br>the photon mapping algorithm <br>to handle dynamic environments. First, for each animation segment<br>the static irradiance cache is computed only once for the scene with<br>all dynamic objects removed. Then, for each frame, the<br>dynamic objects are inserted and the irradiance cache<br>is updated locally in the scene regions whose lighting<br>is strongly affected by the objects. In the remaining<br>scene regions the photon map is used to<br>correct the irradiance values in the static cache.<br>As a result the overall animation rendering efficiency<br>is significantly improved and the temporal aliasing is<br>reduced.
Export
BibTeX
@inproceedings{Tawara-et-al_VMV02, TITLE = {Localizing the Final Gathering for Dynamic Scenes using the Photon Map}, AUTHOR = {Tawara, Takehiro and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {1-58603-302-6}, LOCALID = {Local-ID: C125675300671F7B-26CF9AFACE9BDEF4C1256C80005DCB2A-Tawara2002}, PUBLISHER = {Akademische Verlagsgesellschaft Aka GmbH}, YEAR = {2002}, DATE = {2002}, ABSTRACT = {Rendering of high quality animations with global <br>illumination effects is very costly using traditional <br>techniques designed for static scenes.<br>In this paper we present an extension of<br>the photon mapping algorithm <br>to handle dynamic environments. First, for each animation segment<br>the static irradiance cache is computed only once for the scene with<br>all dynamic objects removed. Then, for each frame, the<br>dynamic objects are inserted and the irradiance cache<br>is updated locally in the scene regions whose lighting<br>is strongly affected by the objects. In the remaining<br>scene regions the photon map is used to<br>correct the irradiance values in the static cache.<br>As a result the overall animation rendering efficiency<br>is significantly improved and the temporal aliasing is<br>reduced.}, BOOKTITLE = {Proceedings of Vision, Modeling, and Visualization 2002 (VMV 2002)}, PAGES = {69--76}, ADDRESS = {Erlangen, Germany}, }
Endnote
%0 Conference Proceedings %A Tawara, Takehiro %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Localizing the Final Gathering for Dynamic Scenes using the Photon Map : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2FD2-D %F EDOC: 202197 %F OTHER: Local-ID: C125675300671F7B-26CF9AFACE9BDEF4C1256C80005DCB2A-Tawara2002 %D 2002 %B 7th International Fall Workshop on Vision, Modeling, and Visualization %Z date of event: 2002-11-20 - 2002-11-22 %C Erlangen, Germany %X Rendering of high quality animations with global <br>illumination effects is very costly using traditional <br>techniques designed for static scenes.<br>In this paper we present an extension of<br>the photon mapping algorithm <br>to handle dynamic environments. First, for each animation segment<br>the static irradiance cache is computed only once for the scene with<br>all dynamic objects removed. Then, for each frame, the<br>dynamic objects are inserted and the irradiance cache<br>is updated locally in the scene regions whose lighting<br>is strongly affected by the objects. In the remaining<br>scene regions the photon map is used to<br>correct the irradiance values in the static cache.<br>As a result the overall animation rendering efficiency<br>is significantly improved and the temporal aliasing is<br>reduced. %B Proceedings of Vision, Modeling, and Visualization 2002 %P 69 - 76 %I Akademische Verlagsgesellschaft Aka GmbH %@ 1-58603-302-6
Tarini, M., Lensch, H.P.A., Goesele, M., and Seidel, H.-P. 2002a. Shape from Distortion: 3D Range Scanning of Mirroring Objects. ACM SIGGRAPH 2002 conference abstracts and applications, ACM.
Export
BibTeX
@inproceedings{Tarini-et-al_SIGGRAPH02, TITLE = {Shape from Distortion: {3D} Range Scanning of Mirroring Objects}, AUTHOR = {Tarini, Marco and Lensch, Hendrik P. A. and Goesele, Michael and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-58113-525-1}, DOI = {10.1145/1242073.1242261}, LOCALID = {Local-ID: C125675300671F7B-9292873746EDB05DC1256D0200336D28-Tarini:2002:SFD}, PUBLISHER = {ACM}, YEAR = {2002}, DATE = {2002}, BOOKTITLE = {ACM SIGGRAPH 2002 conference abstracts and applications}, PAGES = {248--248}, ADDRESS = {San Antonio, TX, USA}, }
Endnote
%0 Conference Proceedings %A Tarini, Marco %A Lensch, Hendrik P. A. %A Goesele, Michael %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Shape from Distortion: 3D Range Scanning of Mirroring Objects : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-3077-A %F EDOC: 202228 %F OTHER: Local-ID: C125675300671F7B-9292873746EDB05DC1256D0200336D28-Tarini:2002:SFD %R 10.1145/1242073.1242261 %D 2002 %B SIGGRAPH 2002 %Z date of event: 2002-07-21 - 2002-07-26 %C San Antonio, TX, USA %B ACM SIGGRAPH 2002 conference abstracts and applications %P 248 - 248 %I ACM %@ 978-1-58113-525-1
Tarini, M., Yamauchi, H., Haber, J., and Seidel, H.-P. 2002b. Texturing Faces. Proceedings of Graphics Interface 2002 (GI 2002), A K Peters.
Abstract
We present a number of techniques to facilitate the generation of textures<br>for facial modeling. In particular, we address the generation of facial skin<br>textures from uncalibrated input photographs as well as the creation of<br>individual textures for facial components such as eyes or teeth. Apart from<br>an initial feature point selection for the skin texturing, all our methods<br>work fully automatically without any user interaction. The resulting<br>textures show a high quality and are suitable for both photo-realistic and<br>real-time facial animation.
Export
BibTeX
@inproceedings{Tarini-et-al_GI02, TITLE = {Texturing Faces}, AUTHOR = {Tarini, Marco and Yamauchi, Hitoshi and Haber, J{\"o}rg and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {1-56881-183-7}, DOI = {10.20380/GI2002.11}, LOCALID = {Local-ID: C125675300671F7B-EFCB1D85006F5253C1256B6E00383C7B-Tarini:TF}, PUBLISHER = {A K Peters}, YEAR = {2002}, DATE = {2002}, ABSTRACT = {We present a number of techniques to facilitate the generation of textures<br>for facial modeling. In particular, we address the generation of facial skin<br>textures from uncalibrated input photographs as well as the creation of<br>individual textures for facial components such as eyes or teeth. Apart from<br>an initial feature point selection for the skin texturing, all our methods<br>work fully automatically without any user interaction. The resulting<br>textures show a high quality and are suitable for both photo-realistic and<br>real-time facial animation.}, BOOKTITLE = {Proceedings of Graphics Interface 2002 (GI 2002)}, EDITOR = {McCool, Michael and St{\"u}rzlinger, Wolfgang}, PAGES = {89--98}, ADDRESS = {Calgary, Canada}, }
Endnote
%0 Conference Proceedings %A Tarini, Marco %A Yamauchi, Hitoshi %A Haber, J&#246;rg %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Texturing Faces : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-3095-6 %F EDOC: 202190 %F OTHER: Local-ID: C125675300671F7B-EFCB1D85006F5253C1256B6E00383C7B-Tarini:TF %R 10.20380/GI2002.11 %D 2002 %B Graphics Interface 2002 %Z date of event: 2002-05-27 - 2002-05-29 %C Calgary, Canada %X We present a number of techniques to facilitate the generation of textures<br>for facial modeling. In particular, we address the generation of facial skin<br>textures from uncalibrated input photographs as well as the creation of<br>individual textures for facial components such as eyes or teeth. Apart from<br>an initial feature point selection for the skin texturing, all our methods<br>work fully automatically without any user interaction. The resulting<br>textures show a high quality and are suitable for both photo-realistic and<br>real-time facial animation. %B Proceedings of Graphics Interface 2002 %E McCool, Michael; St&#252;rzlinger, Wolfgang %P 89 - 98 %I A K Peters %@ 1-56881-183-7
Scheib, V., Haber, J., Lin, M.C., and Seidel, H.-P. 2002. Efficient Fitting and Rendering of Large Scattered Data Sets Using Subdivision Surfaces. Computer Graphics Forum (Proc. EG 02), Blackwell.
Abstract
We present a method to efficiently construct and render a smooth<br>surface for approximation of large functional scattered data. Using<br>a subdivision surface framework and techniques from terrain rendering, the<br>resulting surface can be explored from any viewpoint while<br>maintaining high surface fairness and interactive frame rates. We<br>show the approximation error to be sufficiently small for several<br>large data sets. Our system allows for adaptive simplification and<br>provides continuous levels of detail, taking into account the local<br>variation and distribution of the data.
Export
BibTeX
@inproceedings{Scheib-et-al_EG02, TITLE = {Efficient Fitting and Rendering of Large Scattered Data Sets Using Subdivision Surfaces}, AUTHOR = {Scheib, Vincent and Haber, J{\"o}rg and Lin, Ming C. and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/1467-8659.t01-1-00595}, LOCALID = {Local-ID: C125675300671F7B-94C0C9B44A599CA4C1256B800032AF79-Scheib:2002:EFRLSD}, PUBLISHER = {Blackwell}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2002}, DATE = {2002}, ABSTRACT = {We present a method to efficiently construct and render a smooth<br>surface for approximation of large functional scattered data. Using<br>a subdivision surface framework and techniques from terrain rendering, the<br>resulting surface can be explored from any viewpoint while<br>maintaining high surface fairness and interactive frame rates. We<br>show the approximation error to be sufficiently small for several<br>large data sets. Our system allows for adaptive simplification and<br>provides continuous levels of detail, taking into account the local<br>variation and distribution of the data.}, BOOKTITLE = {EUROGRAPHICS 2002 (EG 02)}, PAGES = {353--362}, JOURNAL = {Computer Graphics Forum (Proc. EG)}, VOLUME = {21}, ISSUE = {3}, ADDRESS = {Saarbr{\"u}cken, Germany}, }
Endnote
%0 Conference Proceedings %A Scheib, Vincent %A Haber, J&#246;rg %A Lin, Ming C. %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Efficient Fitting and Rendering of Large Scattered Data Sets Using Subdivision Surfaces : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2F66-1 %F EDOC: 202224 %F OTHER: Local-ID: C125675300671F7B-94C0C9B44A599CA4C1256B800032AF79-Scheib:2002:EFRLSD %R 10.1111/1467-8659.t01-1-00595 %D 2002 %B EUROGRAPHICS 2002 %Z date of event: 2002-09-02 - 2002-09-06 %C Saarbr&#252;cken, Germany %X We present a method to efficiently construct and render a smooth<br>surface for approximation of large functional scattered data. Using<br>a subdivision surface framework and techniques from terrain rendering, the<br>resulting surface can be explored from any viewpoint while<br>maintaining high surface fairness and interactive frame rates. We<br>show the approximation error to be sufficiently small for several<br>large data sets. Our system allows for adaptive simplification and<br>provides continuous levels of detail, taking into account the local<br>variation and distribution of the data. %B EUROGRAPHICS 2002 %P 353 - 362 %I Blackwell %J Computer Graphics Forum %V 21 %N 3 %I Blackwell-Wiley %@ false
Scheel, A., Stamminger, M., and Seidel, H.-P. 2002. Grid Based Final Gather for Radiosity on Complex Clustered Scenes. Computer Graphics Forum (Proc. EG 02), Blackwell.
Export
BibTeX
@inproceedings{Scheel-et-al_EG02, TITLE = {Grid Based Final Gather for Radiosity on Complex Clustered Scenes}, AUTHOR = {Scheel, Annette and Stamminger, Marc and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/1467-8659.t01-1-00705}, LOCALID = {Local-ID: C125675300671F7B-7E54A478D1851285C1256C80004D41C0-Scheel2002}, PUBLISHER = {Blackwell}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2002}, DATE = {2002}, BOOKTITLE = {EUROGRAPHICS 2002 (EG 02)}, PAGES = {547--556}, JOURNAL = {Computer Graphics Forum (Proc. EG)}, VOLUME = {21}, ISSUE = {3}, ADDRESS = {Saarbr{\"u}cken, Germany}, }
Endnote
%0 Conference Proceedings %A Scheel, Annette %A Stamminger, Marc %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Grid Based Final Gather for Radiosity on Complex Clustered Scenes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2FA3-8 %F EDOC: 202225 %F OTHER: Local-ID: C125675300671F7B-7E54A478D1851285C1256C80004D41C0-Scheel2002 %R 10.1111/1467-8659.t01-1-00705 %D 2002 %B EUROGRAPHICS 2002 %Z date of event: 2002-09-02 - 2002-09-06 %C Saarbr&#252;cken, Germany %B EUROGRAPHICS 2002 %P 547 - 556 %I Blackwell %J Computer Graphics Forum %V 21 %N 3 %I Blackwell-Wiley %@ false
Pérez Risquet, C. and Theisel, H. 2002. Image based reconstruction and interaction with 2D vector fields. Proceedings of Vision, Modeling, and Visualization VMV 2002, Akademische Verlagsgesellschaft Aka GmbH.
Abstract
This paper addresses the problem of symmetrical 2D flow visualization and presents solutions to compute a vector field from a flow image. Based on these solutions new, interactions mechanisms are developed which make visual corrections of 2D vector fields possible. These mechanisms permit an image-based interaction with some features of the vector field: vectors, streamlines, and critical points.
Export
BibTeX
@inproceedings{Theisel2001, TITLE = {Image based reconstruction and interaction with {2D} vector fields}, AUTHOR = {P{\'e}rez Risquet, Carlos and Theisel, Holger}, EDITOR = {Greiner, G{\"u}nther and Niemann, Heinrich and Ertl, Thomas and Girod, Bernd and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-89838-034-3}, LOCALID = {Local-ID: C125675300671F7B-EE0E93F5D6C32977C1256C790036C22E-Theisel2001}, PUBLISHER = {Akademische Verlagsgesellschaft Aka GmbH}, YEAR = {2002}, DATE = {2002}, ABSTRACT = {This paper addresses the problem of symmetrical 2D flow visualization and presents solutions to compute a vector field from a flow image. Based on these solutions new, interactions mechanisms are developed which make visual corrections of 2D vector fields possible. These mechanisms permit an image-based interaction with some features of the vector field: vectors, streamlines, and critical points.}, BOOKTITLE = {Proceedings of Vision, Modeling, and Visualization VMV 2002}, PAGES = {115--122}, ADDRESS = {Erlangen, Germany}, }
Endnote
%0 Conference Proceedings %A P&#233;rez Risquet, Carlos %A Theisel, Holger %E Greiner, G&#252;nther %E Niemann, Heinrich %E Ertl, Thomas %E Girod, Bernd %E Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Image based reconstruction and interaction with 2D vector fields : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2FB2-6 %F EDOC: 202226 %F OTHER: Local-ID: C125675300671F7B-EE0E93F5D6C32977C1256C790036C22E-Theisel2001 %D 2002 %B VMV 2002 %Z date of event: 2002-11-20 - 2002-11-22 %C Erlangen, Germany %X This paper addresses the problem of symmetrical 2D flow visualization and presents solutions to compute a vector field from a flow image. Based on these solutions new, interactions mechanisms are developed which make visual corrections of 2D vector fields possible. These mechanisms permit an image-based interaction with some features of the vector field: vectors, streamlines, and critical points. %B Proceedings of Vision, Modeling, and Visualization VMV 2002 %P 115 - 122 %I Akademische Verlagsgesellschaft Aka GmbH %@ 3-89838-034-3
Ohtake, Y., Belyaev, A., and Seidel, H.-P. 2002. Mesh Smoothing by Adaptive and Anisotropic Gaussian Filter Applied to Mesh Normals. Proceedings of Vision, Modeling, and Visualization 2002 (VMV 2002), Akademische Verlagsgesellschaft Aka GmbH.
Abstract
In this paper, we develop a fully automatic mesh filtering <br>method that adaptively smoothes a noisy mesh and preserves <br>sharp features and features consisting of only few triangle strips.<br>In addition, it outperforms other conventional smoothing methods<br>in terms of accuracy.
Export
BibTeX
@inproceedings{Ohtake-et-al_VMV02, TITLE = {Mesh Smoothing by Adaptive and Anisotropic Gaussian Filter Applied to Mesh Normals}, AUTHOR = {Ohtake, Yutaka and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-89838-034-3}, LOCALID = {Local-ID: C125675300671F7B-F4EFA2212965F6D2C1256CB5005D87E4-vmv02obs}, PUBLISHER = {Akademische Verlagsgesellschaft Aka GmbH}, YEAR = {2002}, DATE = {2002}, ABSTRACT = {In this paper, we develop a fully automatic mesh filtering <br>method that adaptively smoothes a noisy mesh and preserves <br>sharp features and features consisting of only few triangle strips.<br>In addition, it outperforms other conventional smoothing methods<br>in terms of accuracy.}, BOOKTITLE = {Proceedings of Vision, Modeling, and Visualization 2002 (VMV 2002)}, PAGES = {203--210}, ADDRESS = {Erlangen, Germany}, }
Endnote
%0 Conference Proceedings %A Ohtake, Yutaka %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Mesh Smoothing by Adaptive and Anisotropic Gaussian Filter Applied to Mesh Normals : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2FE5-5 %F EDOC: 202221 %F OTHER: Local-ID: C125675300671F7B-F4EFA2212965F6D2C1256CB5005D87E4-vmv02obs %D 2002 %B 7th International Fall Workshop on Vision, Modeling, and Visualization %Z date of event: 2002-11-20 - 2002-11-22 %C Erlangen, Germany %X In this paper, we develop a fully automatic mesh filtering <br>method that adaptively smoothes a noisy mesh and preserves <br>sharp features and features consisting of only few triangle strips.<br>In addition, it outperforms other conventional smoothing methods<br>in terms of accuracy. %B Proceedings of Vision, Modeling, and Visualization 2002 %P 203 - 210 %I Akademische Verlagsgesellschaft Aka GmbH %@ 3-89838-034-3
Myszkowski, K., Tawara, T., and Seidel, H.-P. 2002. Using Animation Quality Metric to Improve Efficiency of Global Illumination Computation for Dynamic Environments. Proceedings of 7th SPIE Conference Human Vision and Electronic Imaging, SPIE - The International Society for Optical Engineering.
Export
BibTeX
@inproceedings{Myszkowski-et-al_HVEI02, TITLE = {Using Animation Quality Metric to Improve Efficiency of Global Illumination Computation for Dynamic Environments}, AUTHOR = {Myszkowski, Karol and Tawara, Takehiro and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-8194-4402-2}, DOI = {10.1117/12.469514}, LOCALID = {Local-ID: C125675300671F7B-3C349C0FFBBA9B5FC1256C36002A89AD-MyszkowskiSpie2002}, PUBLISHER = {SPIE -- The International Society for Optical Engineering}, YEAR = {2002}, DATE = {2002}, BOOKTITLE = {Proceedings of 7th SPIE Conference Human Vision and Electronic Imaging}, EDITOR = {Rogowitz, Bernice and Pappas, Thrasyvoulos}, PAGES = {187--196}, SERIES = {SPIE Proceedings Series}, VOLUME = {4662}, ADDRESS = {San Jose, USA}, }
Endnote
%0 Conference Proceedings %A Myszkowski, Karol %A Tawara, Takehiro %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Using Animation Quality Metric to Improve Efficiency of Global Illumination Computation for Dynamic Environments : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-30B6-B %F EDOC: 202177 %F OTHER: Local-ID: C125675300671F7B-3C349C0FFBBA9B5FC1256C36002A89AD-MyszkowskiSpie2002 %R 10.1117/12.469514 %D 2002 %B 7th SPIE Conference Human Vision and Electronic Imaging %Z date of event: 2002-01-21 - 2002-01-24 %C San Jose, USA %B Proceedings of 7th SPIE Conference Human Vision and Electronic Imaging %E Rogowitz, Bernice; Pappas, Thrasyvoulos %P 187 - 196 %I SPIE - The International Society for Optical Engineering %@ 0-8194-4402-2 %B SPIE Proceedings Series %N 4662
Li, M., Schirmacher, H., Magnor, M., and Seidel, H.-P. 2002. Combining Stereo and Visual Hull Information for On-line Reonstruction and Rendering of Dynamic Scenes. Proceedings of the IEEE Multimedia Signal Processing Workshop 2002 (MMSP 2002), IEEE.
Export
BibTeX
@inproceedings{Li-et-al_MMSP02, TITLE = {Combining Stereo and Visual Hull Information for On-line Reonstruction and Rendering of Dynamic Scenes}, AUTHOR = {Li, Ming and Schirmacher, Hartmut and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7803-7713-3}, DOI = {10.1109/MMSP.2002.1203235}, LOCALID = {Local-ID: C1256BDE005F57A8-C65E95DDBA259B1AC1256CA2005F93D0-Li2002:CSV}, PUBLISHER = {IEEE}, YEAR = {2002}, DATE = {2002}, BOOKTITLE = {Proceedings of the IEEE Multimedia Signal Processing Workshop 2002 (MMSP 2002)}, EDITOR = {Ostermann, Joern and Zhuang, Xinhua}, PAGES = {9--12}, ADDRESS = {St. Thomas, VI, USA}, }
Endnote
%0 Conference Proceedings %A Li, Ming %A Schirmacher, Hartmut %A Magnor, Marcus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Combining Stereo and Visual Hull Information for On-line Reonstruction and Rendering of Dynamic Scenes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2F3E-0 %F EDOC: 202235 %F OTHER: Local-ID: C1256BDE005F57A8-C65E95DDBA259B1AC1256CA2005F93D0-Li2002:CSV %R 10.1109/MMSP.2002.1203235 %D 2002 %B IEEE Multimedia Signal Processing Workshop 2002 %Z date of event: 2002-12-09 - 2002-12-11 %C St. Thomas, VI, USA %B Proceedings of the IEEE Multimedia Signal Processing Workshop 2002 %E Ostermann, Joern; Zhuang, Xinhua %P 9 - 12 %I IEEE %@ 0-7803-7713-3
Lensch, H.P.A., Goesele, M., Kautz, J., and Seidel, H.-P. 2002a. The 3D Object Pipeline - Capturing, Processing and Interactive Display of Objects with Complex Appearance. Information technology44, 6.
Abstract
This article highlights some recent results on the capture and interactive <br>display of high quality 3D models with complex appearance. For use in <br>photorealistic rendering or object recognition, a high quality model must <br>capture two things: the shape of the object represented as a geometric <br>description of its surface and the appearance of the materials it is made of, <br>e.g. the object's color, texture, or reflection properties.<br><br>The article shows how computer vision and computer graphics techniques can be <br>seamlessly integrated into a 3D object pipeline for capturing, processing, and <br>interactive display of objects with complex appearance.
Export
BibTeX
@article{Lensch-et-al_IT02, TITLE = {The {3D} Object Pipeline -- Capturing, Processing and Interactive Display of Objects with Complex Appearance}, AUTHOR = {Lensch, Hendrik P. A. and Goesele, Michael and Kautz, Jan and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1611-2776}, DOI = {10.1524/itit.2002.44.6.322}, LOCALID = {Local-ID: C125675300671F7B-2FAABA02FCB8A978C1256C940031795B-Lensch:3OP:2002}, PUBLISHER = {Oldenbourg Wissenschaftsverlag}, ADDRESS = {M{\"u}nchen}, YEAR = {2002}, DATE = {2002}, ABSTRACT = {This article highlights some recent results on the capture and interactive <br>display of high quality 3D models with complex appearance. For use in <br>photorealistic rendering or object recognition, a high quality model must <br>capture two things: the shape of the object represented as a geometric <br>description of its surface and the appearance of the materials it is made of, <br>e.g. the object's color, texture, or reflection properties.<br><br>The article shows how computer vision and computer graphics techniques can be <br>seamlessly integrated into a 3D object pipeline for capturing, processing, and <br>interactive display of objects with complex appearance.}, JOURNAL = {Information technology}, VOLUME = {44}, NUMBER = {6}, PAGES = {322--330}, }
Endnote
%0 Journal Article %A Lensch, Hendrik P. A. %A Goesele, Michael %A Kautz, Jan %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T The 3D Object Pipeline - Capturing, Processing and Interactive Display of Objects with Complex Appearance : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-3098-F %F EDOC: 202216 %F OTHER: Local-ID: C125675300671F7B-2FAABA02FCB8A978C1256C940031795B-Lensch:3OP:2002 %R 10.1524/itit.2002.44.6.322 %D 2002 %* Review method: peer-reviewed %X This article highlights some recent results on the capture and interactive <br>display of high quality 3D models with complex appearance. For use in <br>photorealistic rendering or object recognition, a high quality model must <br>capture two things: the shape of the object represented as a geometric <br>description of its surface and the appearance of the materials it is made of, <br>e.g. the object's color, texture, or reflection properties.<br><br>The article shows how computer vision and computer graphics techniques can be <br>seamlessly integrated into a 3D object pipeline for capturing, processing, and <br>interactive display of objects with complex appearance. %J Information technology %O it %V 44 %N 6 %& 322 %P 322 - 330 %I Oldenbourg Wissenschaftsverlag %C M&#252;nchen %@ false
Lensch, H.P.A., Goesele, M., and Seidel, H.-P. 2002b. Digital Collections of Real World Objects. D-Lib Magazine8, 2.
Abstract
Real world objects, such as works of art, archeological artifacts and even <br>common everyday objects, exhibit large variations in color due to the way light <br>is reflected from their surfaces. A high quality digitization method must be <br>capable of capturing these effects if the digital models generated from the <br>real objects are to look realistic.<br><br>In this article, we present an efficient method for acquiring high quality <br>models of real world objects. The resulting digital models can be viewed under <br>arbitrary viewing and lighting conditions. The efficient acquisition technique, <br>small size, high quality, and versatility of the generated models make this <br>technique well suited for large digital collections.
Export
BibTeX
@article{Lensch-et-al_DLM02, TITLE = {Digital Collections of Real World Objects}, AUTHOR = {Lensch, Hendrik P. A. and Goesele, Michael and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1082-9873}, DOI = {10.1045/february2002-goesele}, LOCALID = {Local-ID: C125675300671F7B-785418068186F559C1256C5B003040C9-Lensch:DCR:2002}, PUBLISHER = {Corp. for National Research Initiatives}, ADDRESS = {Reston, Va.}, YEAR = {2002}, DATE = {2002}, ABSTRACT = {Real world objects, such as works of art, archeological artifacts and even <br>common everyday objects, exhibit large variations in color due to the way light <br>is reflected from their surfaces. A high quality digitization method must be <br>capable of capturing these effects if the digital models generated from the <br>real objects are to look realistic.<br><br>In this article, we present an efficient method for acquiring high quality <br>models of real world objects. The resulting digital models can be viewed under <br>arbitrary viewing and lighting conditions. The efficient acquisition technique, <br>small size, high quality, and versatility of the generated models make this <br>technique well suited for large digital collections.}, JOURNAL = {D-Lib Magazine}, VOLUME = {8}, NUMBER = {2}, EID = {3}, }
Endnote
%0 Journal Article %A Lensch, Hendrik P. A. %A Goesele, Michael %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Digital Collections of Real World Objects : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2F57-5 %F EDOC: 202230 %F OTHER: Local-ID: C125675300671F7B-785418068186F559C1256C5B003040C9-Lensch:DCR:2002 %R 10.1045/february2002-goesele %D 2002 %X Real world objects, such as works of art, archeological artifacts and even <br>common everyday objects, exhibit large variations in color due to the way light <br>is reflected from their surfaces. A high quality digitization method must be <br>capable of capturing these effects if the digital models generated from the <br>real objects are to look realistic.<br><br>In this article, we present an efficient method for acquiring high quality <br>models of real world objects. The resulting digital models can be viewed under <br>arbitrary viewing and lighting conditions. The efficient acquisition technique, <br>small size, high quality, and versatility of the generated models make this <br>technique well suited for large digital collections. %J D-Lib Magazine %V 8 %N 2 %Z sequence number: 3 %I Corp. for National Research Initiatives %C Reston, Va. %@ false
Lensch, H.P.A., Daubert, K., and Seidel, H.-P. 2002c. Interactive Semi-Transparent Volumetric Textures. Proceedings of Vision, Modeling, and Visualization 2002 (VMV 2002), Akademische Verlagsgesellschaft Aka GmbH.
Abstract
Volumetric textures are often used to increase the visual<br>complexity of an object without increasing the polygon count.<br>Although it is much more efficient in terms of memory to store<br>only the volume close to the surface and to determine the<br>overall shape by a triangle mesh, rendering is much more<br>complicated compared to a single volume. We present a new<br>rendering method for volumetric textures which allows highest<br>quality at interactive rates even for semi-transparent volumes.<br>The method is based on 3D texture mapping where hundreds of<br>planes orthogonal to the viewing direction are rendered back to<br>front slicing the 3D surface volume. This way we are able to<br>correctly display semi-transparent objects and generate<br>precise silhouettes. The core problem is to calculate the<br>intersection of prisms formed by extruding the triangles of the<br>mesh along their normals and the rendering planes. We present<br>two solutions, a hybrid and a purely hardware-based approach.
Export
BibTeX
@inproceedings{Lensch-et-al_VMV02, TITLE = {Interactive Semi-Transparent Volumetric Textures}, AUTHOR = {Lensch, Hendrik P. A. and Daubert, Katja and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-89838-034-3}, LOCALID = {Local-ID: C125675300671F7B-97BC27B6AB73752AC1256C3000356B4D-Lensch2002a}, PUBLISHER = {Akademische Verlagsgesellschaft Aka GmbH}, YEAR = {2002}, DATE = {2002}, ABSTRACT = {Volumetric textures are often used to increase the visual<br>complexity of an object without increasing the polygon count.<br>Although it is much more efficient in terms of memory to store<br>only the volume close to the surface and to determine the<br>overall shape by a triangle mesh, rendering is much more<br>complicated compared to a single volume. We present a new<br>rendering method for volumetric textures which allows highest<br>quality at interactive rates even for semi-transparent volumes.<br>The method is based on 3D texture mapping where hundreds of<br>planes orthogonal to the viewing direction are rendered back to<br>front slicing the 3D surface volume. This way we are able to<br>correctly display semi-transparent objects and generate<br>precise silhouettes. The core problem is to calculate the<br>intersection of prisms formed by extruding the triangles of the<br>mesh along their normals and the rendering planes. We present<br>two solutions, a hybrid and a purely hardware-based approach.}, BOOKTITLE = {Proceedings of Vision, Modeling, and Visualization 2002 (VMV 2002)}, PAGES = {505--512}, ADDRESS = {Erlangen, Germany}, }
Endnote
%0 Conference Proceedings %A Lensch, Hendrik P. A. %A Daubert, Katja %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive Semi-Transparent Volumetric Textures : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2FC4-D %F EDOC: 202178 %F OTHER: Local-ID: C125675300671F7B-97BC27B6AB73752AC1256C3000356B4D-Lensch2002a %D 2002 %B 7th International Fall Workshop on Vision, Modeling, and Visualization %Z date of event: 2002-11-20 - 2002-11-22 %C Erlangen, Germany %X Volumetric textures are often used to increase the visual<br>complexity of an object without increasing the polygon count.<br>Although it is much more efficient in terms of memory to store<br>only the volume close to the surface and to determine the<br>overall shape by a triangle mesh, rendering is much more<br>complicated compared to a single volume. We present a new<br>rendering method for volumetric textures which allows highest<br>quality at interactive rates even for semi-transparent volumes.<br>The method is based on 3D texture mapping where hundreds of<br>planes orthogonal to the viewing direction are rendered back to<br>front slicing the 3D surface volume. This way we are able to<br>correctly display semi-transparent objects and generate<br>precise silhouettes. The core problem is to calculate the<br>intersection of prisms formed by extruding the triangles of the<br>mesh along their normals and the rendering planes. We present<br>two solutions, a hybrid and a purely hardware-based approach. %B Proceedings of Vision, Modeling, and Visualization 2002 %P 505 - 512 %I Akademische Verlagsgesellschaft Aka GmbH %@ 3-89838-034-3
Lensch, H., Gösele, M., Bekaert, P., et al. 2002d. Interactive Rendering of Translucent Objects. Proceedings of the 10th Pacific Conference on Computer Graphics and Applications (PG 2002), IEEE.
Export
BibTeX
@inproceedings{Lensch-et-al_PG02, TITLE = {Interactive Rendering of Translucent Objects}, AUTHOR = {Lensch, Hendrik and G{\"o}sele, Michael and Bekaert, Philippe and Kautz, Jan and Magnor, Marcus and Lang, Jochen and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-1784-6}, DOI = {10.1109/PCCGA.2002.1167862}, LOCALID = {Local-ID: C1256BDE005F57A8-67A6B18A662DD086C1256CA2005DA1DC-Lensch2002:IRT}, PUBLISHER = {IEEE}, YEAR = {2002}, DATE = {2002}, BOOKTITLE = {Proceedings of the 10th Pacific Conference on Computer Graphics and Applications (PG 2002)}, DEBUG = {author: Hu, Shi-Min}, EDITOR = {Coquillart, Sabine and Shum, Heung-Yeung}, PAGES = {214--224}, ADDRESS = {Beijing, China}, }
Endnote
%0 Conference Proceedings %A Lensch, Hendrik %A G&#246;sele, Michael %A Bekaert, Philippe %A Kautz, Jan %A Magnor, Marcus %A Lang, Jochen %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive Rendering of Translucent Objects : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2FC1-4 %F EDOC: 202233 %F OTHER: Local-ID: C1256BDE005F57A8-67A6B18A662DD086C1256CA2005DA1DC-Lensch2002:IRT %R 10.1109/PCCGA.2002.1167862 %D 2002 %B 10th Pacific Conference on Computer Graphics and Applications %Z date of event: 2002-10-09 - 2002-10-11 %C Beijing, China %B Proceedings of the 10th Pacific Conference on Computer Graphics and Applications %E Coquillart, Sabine; Shum, Heung-Yeung; Hu, Shi-Min %P 214 - 224 %I IEEE %@ 0-7695-1784-6
Kautz, J., Daubert, K., and Seidel, H.-P. 2002. User-Defined Shading Models for VR Applications. OpenSG 2002 -- 1. OpenSG Symposium, OpenSG.
Export
BibTeX
@inproceedings{Kautz:2002:UDS, TITLE = {User-Defined Shading Models for {VR} Applications}, AUTHOR = {Kautz, Jan and Daubert, Katja and Seidel, Hans-Peter}, LANGUAGE = {eng}, LOCALID = {Local-ID: C125675300671F7B-00C78EED3FA67B6CC1256CF4004BD339-Kautz:2002:UDS}, PUBLISHER = {OpenSG}, YEAR = {2002}, DATE = {2002}, BOOKTITLE = {OpenSG 2002 -- 1. OpenSG Symposium}, PAGES = {1--5}, ADDRESS = {Darmstadt, Germany}, }
Endnote
%0 Conference Proceedings %A Kautz, Jan %A Daubert, Katja %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T User-Defined Shading Models for VR Applications : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-30B3-2 %F EDOC: 202172 %F OTHER: Local-ID: C125675300671F7B-00C78EED3FA67B6CC1256CF4004BD339-Kautz:2002:UDS %D 2002 %B OpenSG 2002 %Z date of event: 2002-01-29 - %C Darmstadt, Germany %B OpenSG 2002 -- 1. OpenSG Symposium %P 1 - 5 %I OpenSG
Kautz, J. and Seidel, H.-P. 2002. Real-Time Halftoning. Journal of Graphics Tools7, 4.
Abstract
We present a real-time hardware accelerated method for rendering<br>objects using halftoning. It is solely based on texture mapping and<br>creates the impression of a printed image, although the lighting and the <br>objects can be changed and manipulated on-the-fly.
Export
BibTeX
@article{Kautz-Seidel_JGT02, TITLE = {Real-Time Halftoning}, AUTHOR = {Kautz, Jan and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1086-7651}, DOI = {10.1080/10867651.2002.10487569}, LOCALID = {Local-ID: C125675300671F7B-7C978E46D991B52CC1256C85003A828D-Kautz:2003:RTH}, PUBLISHER = {A.K. Peters}, ADDRESS = {Wellesley, MA}, YEAR = {2002}, DATE = {2002}, ABSTRACT = {We present a real-time hardware accelerated method for rendering<br>objects using halftoning. It is solely based on texture mapping and<br>creates the impression of a printed image, although the lighting and the <br>objects can be changed and manipulated on-the-fly.}, JOURNAL = {Journal of Graphics Tools}, VOLUME = {7}, NUMBER = {4}, PAGES = {27--31}, }
Endnote
%0 Journal Article %A Kautz, Jan %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Real-Time Halftoning : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2DE6-2 %F EDOC: 202000 %F OTHER: Local-ID: C125675300671F7B-7C978E46D991B52CC1256C85003A828D-Kautz:2003:RTH %R 10.1080/10867651.2002.10487569 %D 2002 %* Review method: peer-reviewed %X We present a real-time hardware accelerated method for rendering<br>objects using halftoning. It is solely based on texture mapping and<br>creates the impression of a printed image, although the lighting and the <br>objects can be changed and manipulated on-the-fly. %J Journal of Graphics Tools %V 7 %N 4 %& 27 %P 27 - 31 %I A.K. Peters %C Wellesley, MA %@ false
Kähler, K., Haber, J., Yamauchi, H., and Seidel, H.-P. 2002. Head Shop: Generating Animated Head Models with Anatomical Structure. Proceedings of the 2002 ACM SIGGRAPH/Eurographics Symposium on Computer Animation, ACM SIGGRAPH.
Abstract
We present a versatile construction and deformation method for head<br>models with anatomical structure, suitable for real-time<br>physics-based facial animation. The model is equipped with landmark<br>data on skin and skull, which allows us to deform the head in<br>anthropometrically meaningful ways. On any deformed model, the<br>underlying muscle and bone structure is adapted as well, such that<br>the model remains completely animatable using the same muscle<br>contraction parameters. We employ this general technique to fit<br>a generic head model to imperfect scan data, and to simulate<br>head growth from early childhood to adult age.
Export
BibTeX
@inproceedings{Kahler-et-al_SIGGRAPH02, TITLE = {Head Shop: Generating Animated Head Models with Anatomical Structure}, AUTHOR = {K{\"a}hler, Kolja and Haber, J{\"o}rg and Yamauchi, Hitoshi and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-58113-573-2}, DOI = {10.1145/545261.545271}, LOCALID = {Local-ID: C125675300671F7B-18F67F3ED9E63E69C1256BBA00649EEA-Kaehler:2002:HS}, PUBLISHER = {ACM SIGGRAPH}, YEAR = {2002}, DATE = {2002}, ABSTRACT = {We present a versatile construction and deformation method for head<br>models with anatomical structure, suitable for real-time<br>physics-based facial animation. The model is equipped with landmark<br>data on skin and skull, which allows us to deform the head in<br>anthropometrically meaningful ways. On any deformed model, the<br>underlying muscle and bone structure is adapted as well, such that<br>the model remains completely animatable using the same muscle<br>contraction parameters. We employ this general technique to fit<br>a generic head model to imperfect scan data, and to simulate<br>head growth from early childhood to adult age.}, BOOKTITLE = {Proceedings of the 2002 ACM SIGGRAPH/Eurographics Symposium on Computer Animation}, PAGES = {55--63}, ADDRESS = {San Antonio, USA}, }
Endnote
%0 Conference Proceedings %A K&#228;hler, Kolja %A Haber, J&#246;rg %A Yamauchi, Hitoshi %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Head Shop: Generating Animated Head Models with Anatomical Structure : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2FAC-5 %F EDOC: 202173 %F OTHER: Local-ID: C125675300671F7B-18F67F3ED9E63E69C1256BBA00649EEA-Kaehler:2002:HS %R 10.1145/545261.545271 %D 2002 %B 2002 ACM SIGGRAPH/Eurographics Symposium on Computer Animation %Z date of event: 2002-07-21 - 2002-07-22 %C San Antonio, USA %X We present a versatile construction and deformation method for head<br>models with anatomical structure, suitable for real-time<br>physics-based facial animation. The model is equipped with landmark<br>data on skin and skull, which allows us to deform the head in<br>anthropometrically meaningful ways. On any deformed model, the<br>underlying muscle and bone structure is adapted as well, such that<br>the model remains completely animatable using the same muscle<br>contraction parameters. We employ this general technique to fit<br>a generic head model to imperfect scan data, and to simulate<br>head growth from early childhood to adult age. %B Proceedings of the 2002 ACM SIGGRAPH/Eurographics Symposium on Computer Animation %P 55 - 63 %I ACM SIGGRAPH %@ 978-1-58113-573-2
Jeong, W.-K., Kähler, K., and Seidel, H.-P. 2002a. Subdivision Surface Simplification. Proceedings of the 10th Pacific Conference on Computer Graphics and Applications (PG 2002), IEEE.
Export
BibTeX
@inproceedings{Jeong-et-al_PG02, TITLE = {Subdivision Surface Simplification}, AUTHOR = {Jeong, Won-Ki and K{\"a}hler, Kolja and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-1784-6}, DOI = {10.1109/PCCGA.2002.1167907}, LOCALID = {Local-ID: C125675300671F7B-646A1CC9943E8E5BC1256CB700517181-Jeong2002:SSS}, PUBLISHER = {IEEE}, YEAR = {2002}, DATE = {2002}, BOOKTITLE = {Proceedings of the 10th Pacific Conference on Computer Graphics and Applications (PG 2002)}, PAGES = {477--480}, ADDRESS = {Beijing, China}, }
Endnote
%0 Conference Proceedings %A Jeong, Won-Ki %A K&#228;hler, Kolja %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Subdivision Surface Simplification : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-308D-9 %F EDOC: 202174 %F OTHER: Local-ID: C125675300671F7B-646A1CC9943E8E5BC1256CB700517181-Jeong2002:SSS %R 10.1109/PCCGA.2002.1167907 %D 2002 %B 10th Pacific Conference on Computer Graphics and Applications %Z date of event: 2002-10-09 - 2002-10-11 %C Beijing, China %B Proceedings of the 10th Pacific Conference on Computer Graphics and Applications %P 477 - 480 %I IEEE %@ 0-7695-1784-6
Jeong, W.-K., Kähler, K., Haber, J., and Seidel, H.-P. 2002b. Automatic Generation of Subdivision Surface Head Models from Point Cloud Data. Proceedings of Graphics Interface 2002, A K Peters.
Abstract
An automatic procedure is presented to generate a multiresolution<br>head model from sampled surface data. A generic control mesh serves<br>as the starting point for a fitting algorithm that approximates the<br>points in an unstructured set of surface samples, e.g.~a point cloud<br>obtained directly from range scans of an individual. A hierarchical<br>representation of the model is generated by repeated refinement<br>using subdivision rules and measuring displacements to the input<br>data. Key features of our method are the fully automated<br>construction process, the ability to deal with noisy and incomplete<br>input data, and no requirement for further processing of the scan<br>data after registering the range images into a single point cloud.
Export
BibTeX
@inproceedings{Jeong-et-al_GI02, TITLE = {Automatic Generation of Subdivision Surface Head Models from Point Cloud Data}, AUTHOR = {Jeong, Won-Ki and K{\"a}hler, Kolja and Haber, J{\"o}rg and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {1-56881-183-7}, DOI = {10.20380/GI2002.21}, LOCALID = {Local-ID: C125675300671F7B-414E8B1009453694C1256B6E003895D2-Jeong:AGSSHM}, PUBLISHER = {A K Peters}, YEAR = {2002}, DATE = {2002}, ABSTRACT = {An automatic procedure is presented to generate a multiresolution<br>head model from sampled surface data. A generic control mesh serves<br>as the starting point for a fitting algorithm that approximates the<br>points in an unstructured set of surface samples, e.g.~a point cloud<br>obtained directly from range scans of an individual. A hierarchical<br>representation of the model is generated by repeated refinement<br>using subdivision rules and measuring displacements to the input<br>data. Key features of our method are the fully automated<br>construction process, the ability to deal with noisy and incomplete<br>input data, and no requirement for further processing of the scan<br>data after registering the range images into a single point cloud.}, BOOKTITLE = {Proceedings of Graphics Interface 2002}, EDITOR = {McCool, Michael and St{\"u}rzlinger, Wolfgang}, PAGES = {181--188}, ADDRESS = {Calgary, Canada}, }
Endnote
%0 Conference Proceedings %A Jeong, Won-Ki %A K&#228;hler, Kolja %A Haber, J&#246;rg %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Automatic Generation of Subdivision Surface Head Models from Point Cloud Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2F26-4 %F EDOC: 202211 %F OTHER: Local-ID: C125675300671F7B-414E8B1009453694C1256B6E003895D2-Jeong:AGSSHM %R 10.20380/GI2002.21 %D 2002 %B Graphics Interface 2002 %Z date of event: 2002-05-27 - 2002-05-29 %C Calgary, Canada %X An automatic procedure is presented to generate a multiresolution<br>head model from sampled surface data. A generic control mesh serves<br>as the starting point for a fitting algorithm that approximates the<br>points in an unstructured set of surface samples, e.g.~a point cloud<br>obtained directly from range scans of an individual. A hierarchical<br>representation of the model is generated by repeated refinement<br>using subdivision rules and measuring displacements to the input<br>data. Key features of our method are the fully automated<br>construction process, the ability to deal with noisy and incomplete<br>input data, and no requirement for further processing of the scan<br>data after registering the range images into a single point cloud. %B Proceedings of Graphics Interface 2002 %E McCool, Michael; St&#252;rzlinger, Wolfgang %P 181 - 188 %I A K Peters %@ 1-56881-183-7
Ivrissimtzis, I. and Seidel, H.-P. 2002. Polyhedra Operators for Mesh Refinement. Proceedings of Geometric Modeling and Processing 2002 (GMP 2002), IEEE.
Export
BibTeX
@inproceedings{Ivrissimtzis-Seidel_GMP02, TITLE = {Polyhedra Operators for Mesh Refinement}, AUTHOR = {Ivrissimtzis, Ioannis and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-1674-2}, DOI = {10.1109/GMAP.2002.1027504}, LOCALID = {Local-ID: C125675300671F7B-21B71DBAB1B06E60C1256C3D00428B44-is02a}, PUBLISHER = {IEEE}, YEAR = {2002}, DATE = {2002}, BOOKTITLE = {Proceedings of Geometric Modeling and Processing 2002 (GMP 2002)}, EDITOR = {Suzuki, Hiromasa and Martin, Ralph}, PAGES = {132--137}, ADDRESS = {Wako, Japan}, }
Endnote
%0 Conference Proceedings %A Ivrissimtzis, Ioannis %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Polyhedra Operators for Mesh Refinement : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-302A-5 %F EDOC: 202212 %F OTHER: Local-ID: C125675300671F7B-21B71DBAB1B06E60C1256C3D00428B44-is02a %R 10.1109/GMAP.2002.1027504 %D 2002 %B Geometric Modeling and Processing 2002 %Z date of event: 2002-07-10 - 2002-07-12 %C Wako, Japan %B Proceedings of Geometric Modeling and Processing 2002 %E Suzuki, Hiromasa; Martin, Ralph %P 132 - 137 %I IEEE %@ 0-7695-1674-2
Ivrissimtzis, I., Rössl, C., and Seidel, H.-P. 2002. A Divide and Conquer Algorithm for Triangle Mesh Connectivity Encoding. Proceedings of the 10th Pacific Conference on Computer Graphics and Applications (PG 2002), IEEE.
Abstract
We propose a divide and conquer algorithm for the single resolution encoding of <br>triangle mesh connectivity. Starting from a boundary edge we grow a zig-zag <br>strip which divides the mesh into two submeshes which are encoded separately in <br>a recursive process. We introduce a novel data structure for triangle mesh <br>encoding, a binary tree with positive integer weights assigned to its nodes. <br>The length of the initial strip is stored in the root of the binary tree, while <br>the encoding of the left and right submesh are stored in the left and right <br>subtree, respectively. We find a simple criterion determining which objects of <br>this data<br>structure correspond to triangle meshes. As the algorithm implicitly traverses <br>the triangles of the mesh, it can be classified into the family of Edgebreaker <br>like encoding schemes. Hence, the compression ratios, both in the form of <br>theoretical upper bounds and practical results are similar to the <br>Edgebreaker's, while the simplicity and flexibility of the algorithm makes it <br>particularly suitable for applications where the connectivity encoding is only <br>a small part of the problem at hand.
Export
BibTeX
@inproceedings{Ivrissimtzis-et-al_PG02, TITLE = {A Divide and Conquer Algorithm for Triangle Mesh Connectivity Encoding}, AUTHOR = {Ivrissimtzis, Ioannis and R{\"o}ssl, Christian and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-1784-6}, DOI = {10.1109/PCCGA.2002.1167873}, LOCALID = {Local-ID: C125675300671F7B-76E86DD1D082CB7FC1256C550050294D-irs02a}, PUBLISHER = {IEEE}, YEAR = {2002}, DATE = {2002}, ABSTRACT = {We propose a divide and conquer algorithm for the single resolution encoding of <br>triangle mesh connectivity. Starting from a boundary edge we grow a zig-zag <br>strip which divides the mesh into two submeshes which are encoded separately in <br>a recursive process. We introduce a novel data structure for triangle mesh <br>encoding, a binary tree with positive integer weights assigned to its nodes. <br>The length of the initial strip is stored in the root of the binary tree, while <br>the encoding of the left and right submesh are stored in the left and right <br>subtree, respectively. We find a simple criterion determining which objects of <br>this data<br>structure correspond to triangle meshes. As the algorithm implicitly traverses <br>the triangles of the mesh, it can be classified into the family of Edgebreaker <br>like encoding schemes. Hence, the compression ratios, both in the form of <br>theoretical upper bounds and practical results are similar to the <br>Edgebreaker's, while the simplicity and flexibility of the algorithm makes it <br>particularly suitable for applications where the connectivity encoding is only <br>a small part of the problem at hand.}, BOOKTITLE = {Proceedings of the 10th Pacific Conference on Computer Graphics and Applications (PG 2002)}, EDITOR = {Coquillart, Sabine and Shum, Heung-Yeung and Hu, Shi-Min}, PAGES = {294--303}, ADDRESS = {Beijing, China}, }
Endnote
%0 Conference Proceedings %A Ivrissimtzis, Ioannis %A R&#246;ssl, Christian %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Divide and Conquer Algorithm for Triangle Mesh Connectivity Encoding : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2EE3-F %F EDOC: 202206 %F OTHER: Local-ID: C125675300671F7B-76E86DD1D082CB7FC1256C550050294D-irs02a %R 10.1109/PCCGA.2002.1167873 %D 2002 %B 10th Pacific Conference on Computer Graphics and Applications %Z date of event: 2002-10-09 - 2002-10-11 %C Beijing, China %X We propose a divide and conquer algorithm for the single resolution encoding of <br>triangle mesh connectivity. Starting from a boundary edge we grow a zig-zag <br>strip which divides the mesh into two submeshes which are encoded separately in <br>a recursive process. We introduce a novel data structure for triangle mesh <br>encoding, a binary tree with positive integer weights assigned to its nodes. <br>The length of the initial strip is stored in the root of the binary tree, while <br>the encoding of the left and right submesh are stored in the left and right <br>subtree, respectively. We find a simple criterion determining which objects of <br>this data<br>structure correspond to triangle meshes. As the algorithm implicitly traverses <br>the triangles of the mesh, it can be classified into the family of Edgebreaker <br>like encoding schemes. Hence, the compression ratios, both in the form of <br>theoretical upper bounds and practical results are similar to the <br>Edgebreaker's, while the simplicity and flexibility of the algorithm makes it <br>particularly suitable for applications where the connectivity encoding is only <br>a small part of the problem at hand. %B Proceedings of the 10th Pacific Conference on Computer Graphics and Applications %E Coquillart, Sabine; Shum, Heung-Yeung; Hu, Shi-Min %P 294 - 303 %I IEEE %@ 0-7695-1784-6
Greiner, G., Niemann, H., Ertl, T., Girod, B., and Seidel, H.-P., eds. 2002. Proceedings of Vision, Modeling, and Visualization VMV 2002. Akademische Verlagsgesellschaft Aka GmbH.
Export
BibTeX
@proceedings{vmv2002, TITLE = {Proceedings of Vision, Modeling, and Visualization VMV 2002}, EDITOR = {Greiner, G{\"u}nther and Niemann, Heinrich and Ertl, Thomas and Girod, Bernd and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-89838-034-3}, LOCALID = {Local-ID: C125675300671F7B-5987E81F12FE5242C1256D250037127B-vmv2002}, PUBLISHER = {Akademische Verlagsgesellschaft Aka GmbH}, YEAR = {2002}, DATE = {2002}, PAGES = {533}, }
Endnote
%0 Conference Proceedings %E Greiner, G&#252;nther %E Niemann, Heinrich %E Ertl, Thomas %E Girod, Bernd %E Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society %T Proceedings of Vision, Modeling, and Visualization VMV 2002 : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-303B-0 %F EDOC: 202192 %@ 3-89838-034-3 %F OTHER: Local-ID: C125675300671F7B-5987E81F12FE5242C1256D250037127B-vmv2002 %I Akademische Verlagsgesellschaft Aka GmbH %D 2002 %B Untitled Event %Z date of event: 2002-11-20 - 2002-11-22 %D 2002 %C Erlangen, Germany %P 533
Gösele, M., Kautz, J., Lang, J., Lensch, H.P.A., and Seidel, H.-P. 2002. Tutorial notes ACM SM 02: a framework for the acquisition, processing and interactive display of high quality 3D models. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
This tutorial highlights some recent results on the acquisition and interactive display of high quality 3D models. For further use in photorealistic rendering or interactive display, a high quality representation must capture two different things: the shape of the model represented as a geometric description of its surface and on the other hand the physical properties of the object. The physics of the material which an object is made of determine its appearance, e.g. the object's color, texture, deformation or reflection properties. The tutorial shows how computer vision and computer graphics techniques can be seamlessly integrated into a single framework for the acquisition, processing, and interactive display of high quality 3D models.
Export
BibTeX
@techreport{GoeseleKautzLangLenschSeidel2002, TITLE = {Tutorial notes {ACM} {SM} 02: a framework for the acquisition, processing and interactive display of high quality {3D} models}, AUTHOR = {G{\"o}sele, Michael and Kautz, Jan and Lang, Jochen and Lensch, Hendrik P. A. and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2002-4-001}, NUMBER = {MPI-I-2002-4-001}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2002}, DATE = {2002}, ABSTRACT = {This tutorial highlights some recent results on the acquisition and interactive display of high quality 3D models. For further use in photorealistic rendering or interactive display, a high quality representation must capture two different things: the shape of the model represented as a geometric description of its surface and on the other hand the physical properties of the object. The physics of the material which an object is made of determine its appearance, e.g. the object's color, texture, deformation or reflection properties. The tutorial shows how computer vision and computer graphics techniques can be seamlessly integrated into a single framework for the acquisition, processing, and interactive display of high quality 3D models.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A G&#246;sele, Michael %A Kautz, Jan %A Lang, Jochen %A Lensch, Hendrik P. A. %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Tutorial notes ACM SM 02: a framework for the acquisition, processing and interactive display of high quality 3D models : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-6C86-A %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2002-4-001 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2002 %P 50 p. %X This tutorial highlights some recent results on the acquisition and interactive display of high quality 3D models. For further use in photorealistic rendering or interactive display, a high quality representation must capture two different things: the shape of the model represented as a geometric description of its surface and on the other hand the physical properties of the object. The physics of the material which an object is made of determine its appearance, e.g. the object's color, texture, deformation or reflection properties. The tutorial shows how computer vision and computer graphics techniques can be seamlessly integrated into a single framework for the acquisition, processing, and interactive display of high quality 3D models. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Goldluecke, B., Magnor, M., and Wilburn, B. 2002. Hardware-accelerated Dynamic Light Field Rendering. Proceedings Vision, Modeling and Visualization VMV 2002, aka.
Abstract
We present a system capable of interactively displaying a dynamic scene from novel viewpoints by warping and blending images recorded from multiple synchronized video cameras. It is tuned for streamed data and achieves 20~frames per second on modern consumer-class hardware when rendering a 3D~movie from an arbitrary eye point within the convex hull of the recording camera's positions. The quality of the prediction largely depends on the accuracy of the disparity maps which are reconstructed off-line and provided together with the images. We generalize known algorithms for estimating disparities between two images to the case of multiple image streams, aiming at a minimization of warping artifacts and utilization of temporal coherence.
Export
BibTeX
@inproceedings{Goldluecke2002:HAD, TITLE = {Hardware-accelerated Dynamic Light Field Rendering}, AUTHOR = {Goldluecke, Bastian and Magnor, Marcus and Wilburn, Bennett}, EDITOR = {Greiner, G{\"u}nther and Niemann, Heinrich and Ertl, Thomas and Girod, Bernd and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-89838-034-3}, LOCALID = {Local-ID: C1256BDE005F57A8-D9CF9CAB7328DF63C1256CAA003D3C85-Goldluecke2002:HAD}, PUBLISHER = {aka}, YEAR = {2002}, DATE = {2002}, ABSTRACT = {We present a system capable of interactively displaying a dynamic scene from novel viewpoints by warping and blending images recorded from multiple synchronized video cameras. It is tuned for streamed data and achieves 20~frames per second on modern consumer-class hardware when rendering a 3D~movie from an arbitrary eye point within the convex hull of the recording camera's positions. The quality of the prediction largely depends on the accuracy of the disparity maps which are reconstructed off-line and provided together with the images. We generalize known algorithms for estimating disparities between two images to the case of multiple image streams, aiming at a minimization of warping artifacts and utilization of temporal coherence.}, BOOKTITLE = {Proceedings Vision, Modeling and Visualization VMV 2002}, PAGES = {455--462}, ADDRESS = {Erlangen, Germany}, }
Endnote
%0 Conference Proceedings %A Goldluecke, Bastian %A Magnor, Marcus %A Wilburn, Bennett %E Greiner, G&#252;nther %E Niemann, Heinrich %E Ertl, Thomas %E Girod, Bernd %E Seidel, Hans-Peter %+ International Max Planck Research School, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Hardware-accelerated Dynamic Light Field Rendering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2FA6-2 %F EDOC: 202234 %F OTHER: Local-ID: C1256BDE005F57A8-D9CF9CAB7328DF63C1256CAA003D3C85-Goldluecke2002:HAD %D 2002 %B VMV 2002 %Z date of event: 2002-11-20 - 2002-11-22 %C Erlangen, Germany %X We present a system capable of interactively displaying a dynamic scene from novel viewpoints by warping and blending images recorded from multiple synchronized video cameras. It is tuned for streamed data and achieves 20~frames per second on modern consumer-class hardware when rendering a 3D~movie from an arbitrary eye point within the convex hull of the recording camera's positions. The quality of the prediction largely depends on the accuracy of the disparity maps which are reconstructed off-line and provided together with the images. We generalize known algorithms for estimating disparities between two images to the case of multiple image streams, aiming at a minimization of warping artifacts and utilization of temporal coherence. %B Proceedings Vision, Modeling and Visualization VMV 2002 %P 455 - 462 %I aka %@ 3-89838-034-3
Goesele, M., Kautz, J., Lang, J., Lensch, H.P.A., and Seidel, H.-P. 2002a. 7th ACM Symposium on Solid Modeling and Applications; Tutorial T2: A Framework for the Acquisition, Processing and Interactive Display of High Quality 3D Models. Max-Planck-Institut für Informatik.
Export
BibTeX
@proceedings{GoeseleKautzLangLenschSeidel2003, TITLE = {7th ACM Symposium on Solid Modeling and Applications; Tutorial T2: A Framework for the Acquisition, Processing and Interactive Display of High Quality 3D Models}, AUTHOR = {Goesele, Michael and Kautz, Jan and Lang, Jochen and Lensch, Hendrik P. A. and Seidel, Hans-Peter}, EDITOR = {Goesele, Michael and Kautz, Jan and Lang, Jochen and Lensch, Hendrik P. A. and Seidel, Hans-Peter}, LANGUAGE = {eng}, LOCALID = {Local-ID: C125675300671F7B-C304E5123096E9A5C1256CE900370AA8-GoeseleEtal2003}, PUBLISHER = {Max-Planck-Institut f{\"u}r Informatik}, YEAR = {2002}, DATE = {2002}, PAGES = {51}, }
Endnote
%0 Conference Proceedings %A Goesele, Michael %A Kautz, Jan %A Lang, Jochen %A Lensch, Hendrik P. A. %A Seidel, Hans-Peter %E Goesele, Michael %E Kautz, Jan %E Lang, Jochen %E Lensch, Hendrik P. A. %E Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T 7th ACM Symposium on Solid Modeling and Applications; Tutorial T2: A Framework for the Acquisition, Processing and Interactive Display of High Quality 3D Models : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2ED4-2 %F EDOC: 202135 %F OTHER: Local-ID: C125675300671F7B-C304E5123096E9A5C1256CE900370AA8-GoeseleEtal2003 %I Max-Planck-Institut f&#252;r Informatik %D 2002 %B Untitled Event %Z date of event: 2002-06-17 - 2002-06-21 %D 2002 %C Saarbr&#252;cken, Germany %P 51
Goesele, M., Kautz, J., Lang, J., Lensch, H.P.A., and Seidel, H.-P. 2002b. 7th ACM Symposium on Solid Modeling and Applications; Tutorial T1: Geometric Algorithms for Planning and Simulation Tasks in Virtual Prototyping. Max-Planck-Institut für Informatik.
Export
BibTeX
@proceedings{GoeseleEtal2003, TITLE = {7th ACM Symposium on Solid Modeling and Applications; Tutorial T1: Geometric Algorithms for Planning and Simulation Tasks in Virtual Prototyping}, AUTHOR = {Goesele, Michael and Kautz, Jan and Lang, Jochen and Lensch, Hendrik P. A. and Seidel, Hans-Peter}, EDITOR = {Sch{\"o}mer, Elmar and Reichel, Joachim and Warken, Thomas and Lennerz, Christian}, LANGUAGE = {eng}, LOCALID = {Local-ID: C1256428004B93B8-E5DA0A4B66C15276C1256D9E00539F80-GoeseleEtal2003}, PUBLISHER = {Max-Planck-Institut f{\"u}r Informatik}, YEAR = {2002}, DATE = {2002}, PAGES = {175}, ADDRESS = {Saarbr{\"u}cken, Germany}, }
Endnote
%0 Conference Proceedings %A Goesele, Michael %A Kautz, Jan %A Lang, Jochen %A Lensch, Hendrik P. A. %A Seidel, Hans-Peter %E Sch&#246;mer, Elmar %E Reichel, Joachim %E Warken, Thomas %E Lennerz, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society Discrete Optimization, MPI for Informatics, Max Planck Society %T 7th ACM Symposium on Solid Modeling and Applications; Tutorial T1: Geometric Algorithms for Planning and Simulation Tasks in Virtual Prototyping : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2ED2-6 %F EDOC: 202086 %F OTHER: Local-ID: C1256428004B93B8-E5DA0A4B66C15276C1256D9E00539F80-GoeseleEtal2003 %I Max-Planck-Institut f&#252;r Informatik %D 2002 %B 7th ACM Symposium on Solid Modeling and Applications %Z date of event: 2002-06-17 - 2002-06-21 %D 2002 %C Saarbr&#252;cken, Germany %P 175
Drettakis, G. and Seidel, H.-P., eds. 2002. EUROGRAPHICS 2002. Blackwell.
Export
BibTeX
@proceedings{eg2002, TITLE = {EUROGRAPHICS 2002}, EDITOR = {Drettakis, George and Seidel, Hans-Peter}, LANGUAGE = {eng}, LOCALID = {Local-ID: C125675300671F7B-95A0E4D602973CB1C1256D250037F11B-eg2002}, PUBLISHER = {Blackwell}, YEAR = {2002}, DATE = {2002}, PAGES = {656}, }
Endnote
%0 Conference Proceedings %E Drettakis, George %E Seidel, Hans-Peter %+ Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T EUROGRAPHICS 2002 : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2F6F-F %F EDOC: 202129 %F OTHER: Local-ID: C125675300671F7B-95A0E4D602973CB1C1256D250037F11B-eg2002 %I Blackwell %D 2002 %B Untitled Event %Z date of event: 2002-09-02 - 2002-09-06 %D 2002 %C Saarbr&#252;cken, Germany %P 656
Drago, F., Martens, W., Myszkowski, K., and Seidel, H.-P. 2002. Perceptual evaluation of tone mapping operators with regard to similarity and preference. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
Seven tone mapping methods currently available to display high dynamic range images were submitted to perceptual evaluation in order to find the attributes most predictive of the success of a robust all-around tone mapping algorithm. The two most salient Stimulus Space dimensions underlying the perception of a set of images produced by six of the tone mappings were revealed using INdividual Differences SCALing (INDSCAL) analysis; and an ideal preference point within the INDSCAL-derived Stimulus Space was determined for a group of 11 observers using PREFerence MAPping (PREFMAP) analysis. Interpretation of the INDSCAL results was aided by pairwise comparisons of images that led to an ordering of the images according to which were more or less natural looking.
Export
BibTeX
@techreport{DragoMartensMyszkowskiSeidel2002, TITLE = {Perceptual evaluation of tone mapping operators with regard to similarity and preference}, AUTHOR = {Drago, Frederic and Martens, William and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2002-4-002}, NUMBER = {MPI-I-2002-4-002}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2002}, DATE = {2002}, ABSTRACT = {Seven tone mapping methods currently available to display high dynamic range images were submitted to perceptual evaluation in order to find the attributes most predictive of the success of a robust all-around tone mapping algorithm. The two most salient Stimulus Space dimensions underlying the perception of a set of images produced by six of the tone mappings were revealed using INdividual Differences SCALing (INDSCAL) analysis; and an ideal preference point within the INDSCAL-derived Stimulus Space was determined for a group of 11 observers using PREFerence MAPping (PREFMAP) analysis. Interpretation of the INDSCAL results was aided by pairwise comparisons of images that led to an ordering of the images according to which were more or less natural looking.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Drago, Frederic %A Martens, William %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptual evaluation of tone mapping operators with regard to similarity and preference : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-6C83-0 %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2002-4-002 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2002 %P 30 p. %X Seven tone mapping methods currently available to display high dynamic range images were submitted to perceptual evaluation in order to find the attributes most predictive of the success of a robust all-around tone mapping algorithm. The two most salient Stimulus Space dimensions underlying the perception of a set of images produced by six of the tone mappings were revealed using INdividual Differences SCALing (INDSCAL) analysis; and an ideal preference point within the INDSCAL-derived Stimulus Space was determined for a group of 11 observers using PREFerence MAPping (PREFMAP) analysis. Interpretation of the INDSCAL results was aided by pairwise comparisons of images that led to an ordering of the images according to which were more or less natural looking. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Dmitriev, K., Brabec, S., Myszkowski, K., and Seidel, H.-P. 2002. Interactive Global Illumination Using Selective Photon Tracing. Proceedings of the 13th Eurographics Workshop on Rendering, Eurographics Association.
Export
BibTeX
@inproceedings{Dmitriev-et-al_Eurographics02, TITLE = {Interactive Global Illumination Using Selective Photon Tracing}, AUTHOR = {Dmitriev, Kirill and Brabec, Stefan and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-58113-534-3}, DOI = {10.2312/EGWR/EGWR02/025-036}, LOCALID = {Local-ID: C125675300671F7B-5D4014450BF5525BC1256C360028CE0D-Dmitriev2002}, PUBLISHER = {Eurographics Association}, YEAR = {2002}, DATE = {2002}, BOOKTITLE = {Proceedings of the 13th Eurographics Workshop on Rendering}, PAGES = {25--36}, ADDRESS = {Pisa, Italy}, }
Endnote
%0 Conference Proceedings %A Dmitriev, Kirill %A Brabec, Stefan %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive Global Illumination Using Selective Photon Tracing : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2FBC-1 %F EDOC: 202130 %F OTHER: Local-ID: C125675300671F7B-5D4014450BF5525BC1256C360028CE0D-Dmitriev2002 %R 10.2312/EGWR/EGWR02/025-036 %D 2002 %B 13th Eurographics Workshop on Rendering %Z date of event: 2002-06-26 - 2002-06-28 %C Pisa, Italy %B Proceedings of the 13th Eurographics Workshop on Rendering %P 25 - 36 %I Eurographics Association %@ 978-1-58113-534-3
Daubert, K. and Seidel, H.-P. 2002. Hardware-based Volumetric Knit-wear. Computer Graphics Forum (Proc. EG 02), Blackwell.
Abstract
We present a hardware-based, volumetric approach for rendering<br>knit wear at very interactive rates. A single stitch is<br>represented by a volumetric texture with each voxel storing the<br>main direction of the strands of yarn inside it. We render the<br>knit wear in layers using an approximation of the Banks model.<br>Our hardware implementation allows specular and diffuse material<br>properties to change from one voxel to the next. This enables us<br>to represent yarn made up of different components or render<br>garments with complicated color patterns. Furthermore, our<br>approach can handle self-shadowing of the stitches, and can<br>easily be adapted to also include view-independent scattering.<br>The resulting shader lends itself naturally to mip-mapping, and<br>requires no reordering of the base geometry, making it<br>versatile and easy to use.
Export
BibTeX
@inproceedings{Daubert2002b, TITLE = {Hardware-based Volumetric Knit-wear}, AUTHOR = {Daubert, Katja and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/1467-8659.t01-1-00708}, LOCALID = {Local-ID: C125675300671F7B-C18FC404E3942D37C1256C3000348210-Daubert2002b}, PUBLISHER = {Blackwell}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2002}, DATE = {2002}, ABSTRACT = {We present a hardware-based, volumetric approach for rendering<br>knit wear at very interactive rates. A single stitch is<br>represented by a volumetric texture with each voxel storing the<br>main direction of the strands of yarn inside it. We render the<br>knit wear in layers using an approximation of the Banks model.<br>Our hardware implementation allows specular and diffuse material<br>properties to change from one voxel to the next. This enables us<br>to represent yarn made up of different components or render<br>garments with complicated color patterns. Furthermore, our<br>approach can handle self-shadowing of the stitches, and can<br>easily be adapted to also include view-independent scattering.<br>The resulting shader lends itself naturally to mip-mapping, and<br>requires no reordering of the base geometry, making it<br>versatile and easy to use.}, BOOKTITLE = {EUROGRAPHICS 2002 (EG 02)}, PAGES = {575--583}, JOURNAL = {Computer Graphics Forum (Proc. EG)}, VOLUME = {21}, ISSUE = {3}, ADDRESS = {Saarbr{\"u}cken, Germany}, }
Endnote
%0 Conference Proceedings %A Daubert, Katja %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Hardware-based Volumetric Knit-wear : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2FA9-B %F EDOC: 202131 %F OTHER: Local-ID: C125675300671F7B-C18FC404E3942D37C1256C3000348210-Daubert2002b %R 10.1111/1467-8659.t01-1-00708 %D 2002 %B EUROGRAPHICS 2002 %Z date of event: 2002-09-02 - 2002-09-06 %C Saarbr&#252;cken, Germany %X We present a hardware-based, volumetric approach for rendering<br>knit wear at very interactive rates. A single stitch is<br>represented by a volumetric texture with each voxel storing the<br>main direction of the strands of yarn inside it. We render the<br>knit wear in layers using an approximation of the Banks model.<br>Our hardware implementation allows specular and diffuse material<br>properties to change from one voxel to the next. This enables us<br>to represent yarn made up of different components or render<br>garments with complicated color patterns. Furthermore, our<br>approach can handle self-shadowing of the stitches, and can<br>easily be adapted to also include view-independent scattering.<br>The resulting shader lends itself naturally to mip-mapping, and<br>requires no reordering of the base geometry, making it<br>versatile and easy to use. %B EUROGRAPHICS 2002 %P 575 - 583 %I Blackwell %J Computer Graphics Forum %V 21 %N 3 %I Blackwell-Wiley %@ false
Choi, S.W. and Seidel, H.-P. 2002a. Linear One-sided Stability of MAT for Weakly Injective 3D Domain. Proceedings of the Seventh ACM Symposium on Solid Modeling and Applications (SMA 2002), ACM.
Export
BibTeX
@inproceedings{Choi-Seidel_SMA02, TITLE = {Linear One-sided Stability of {MAT} for Weakly Injective {3D} Domain}, AUTHOR = {Choi, Sung Woo and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-58113-506-0}, DOI = {10.1145/566282.566332}, LOCALID = {Local-ID: C125675300671F7B-6944ADD8EC237337C1256B74005178E7-ChoiSeidelSM2002}, PUBLISHER = {ACM}, YEAR = {2002}, DATE = {2002}, BOOKTITLE = {Proceedings of the Seventh ACM Symposium on Solid Modeling and Applications (SMA 2002)}, PAGES = {344--355}, ADDRESS = {Saarbr{\"u}cken, Germany}, }
Endnote
%0 Conference Proceedings %A Choi, Sung Woo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Linear One-sided Stability of MAT for Weakly Injective 3D Domain : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2FCB-0 %F EDOC: 202124 %F OTHER: Local-ID: C125675300671F7B-6944ADD8EC237337C1256B74005178E7-ChoiSeidelSM2002 %R 10.1145/566282.566332 %D 2002 %B Seventh ACM Symposium on Solid Modeling and Applications %Z date of event: 2002-06-17 - 2002-06-21 %C Saarbr&#252;cken, Germany %B Proceedings of the Seventh ACM Symposium on Solid Modeling and Applications %P 344 - 355 %I ACM %@ 978-1-58113-506-0
Choi, S.W. and Seidel, H.-P. 2002b. Linear One-sided Stability of MAT for Weakly Injective Domain. Journal of Mathematical Imaging and Vision17.
Export
BibTeX
@article{Choi-Seidel_JMIV02, TITLE = {Linear One-sided Stability of {MAT} for Weakly Injective Domain}, AUTHOR = {Choi, Sung Woo and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0924-9907}, DOI = {10.1023/A:1020763509700}, LOCALID = {Local-ID: C125675300671F7B-FA2F09C679B3D93EC1256B740050EDFB-ChoiSeidelJMIV2002}, PUBLISHER = {Kluwer Academic Publishers}, ADDRESS = {Dordrecht, Holland}, YEAR = {2002}, DATE = {2002}, JOURNAL = {Journal of Mathematical Imaging and Vision}, VOLUME = {17}, PAGES = {237--247}, }
Endnote
%0 Journal Article %A Choi, Sung Woo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Linear One-sided Stability of MAT for Weakly Injective Domain : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2FCE-A %F EDOC: 202157 %F OTHER: Local-ID: C125675300671F7B-FA2F09C679B3D93EC1256B740050EDFB-ChoiSeidelJMIV2002 %R 10.1023/A:1020763509700 %D 2002 %* Review method: peer-reviewed %J Journal of Mathematical Imaging and Vision %V 17 %& 237 %P 237 - 247 %I Kluwer Academic Publishers %C Dordrecht, Holland %@ false %U https://rdcu.be/dy1as
Brabec, S., Annen, T., and Seidel, H.-P. 2002a. Shadow Mapping for Hemispherical and Omnidirectional Light Sources. Advances in Modelling, Animation and Rendering (Proceedings Computer Graphics International 2002), Springer.
Export
BibTeX
@inproceedings{Brabec2002:HemiOmni, TITLE = {Shadow Mapping for Hemispherical and Omnidirectional Light Sources}, AUTHOR = {Brabec, Stefan and Annen, Thomas and Seidel, Hans-Peter}, EDITOR = {Vince, John and Earnshaw, Rae}, LANGUAGE = {eng}, ISBN = {1-85233-654-4}, LOCALID = {Local-ID: C125675300671F7B-D46A3434AAD63DD3C1256CB70051705B-Brabec2002:HemiOmni}, PUBLISHER = {Springer}, YEAR = {2002}, DATE = {2002}, BOOKTITLE = {Advances in Modelling, Animation and Rendering (Proceedings Computer Graphics International 2002)}, PAGES = {397--408}, ADDRESS = {Bradford, UK}, }
Endnote
%0 Conference Proceedings %A Brabec, Stefan %A Annen, Thomas %A Seidel, Hans-Peter %E Vince, John %E Earnshaw, Rae %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Shadow Mapping for Hemispherical and Omnidirectional Light Sources : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-3072-3 %F EDOC: 202152 %F OTHER: Local-ID: C125675300671F7B-D46A3434AAD63DD3C1256CB70051705B-Brabec2002:HemiOmni %D 2002 %B Proceedings Computer Graphics International %Z date of event: 2002-07-01 - %C Bradford, UK %B Advances in Modelling, Animation and Rendering (Proceedings Computer Graphics International 2002) %P 397 - 408 %I Springer %@ 1-85233-654-4
Brabec, S. and Seidel, H.-P. 2002. Single Sample Soft Shadows Using Depth Maps. Proceedings of Graphics Interface 2002 (GI 2002), A K Peters.
Abstract
In this paper we propose a new method for rendering soft shadows at interactive <br>frame rates. Although the algorithm only uses information obtained from a <br>single light source sample, it is capable of producing subjectively realistic <br>penumbra regions. We do not claim that the proposed method is physically <br>correct but rather that it is aesthetically correct. Since the algorithm <br>operates on sampled representations of the scene, the shadow computation does <br>not directly depend on the scene complexity. Having only a single depth and <br>object ID map representing the pixels seen by the light source, we can <br>approximate penumbrae by searching the neighborhood of pixels warped from the <br>camera view for relevant blocker information.<br><br>We explain the basic technique in detail, showing how simple observations can <br>yield satisfying results. We also address sampling issues relevant to the <br>quality of the computed shadows, as well as speed-up techniques that are able <br>to bring the performance up to interactive frame rates.
Export
BibTeX
@inproceedings{Brabec-Seidel_GI02, TITLE = {Single Sample Soft Shadows Using Depth Maps}, AUTHOR = {Brabec, Stefan and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {1-56881-183-7}, DOI = {10.20380/GI2002.25}, LOCALID = {Local-ID: C125675300671F7B-0738FE02D2E46A7DC1256C9F0074CF17-Brabec:SSS:2002}, PUBLISHER = {A K Peters}, YEAR = {2002}, DATE = {2002}, ABSTRACT = {In this paper we propose a new method for rendering soft shadows at interactive <br>frame rates. Although the algorithm only uses information obtained from a <br>single light source sample, it is capable of producing subjectively realistic <br>penumbra regions. We do not claim that the proposed method is physically <br>correct but rather that it is aesthetically correct. Since the algorithm <br>operates on sampled representations of the scene, the shadow computation does <br>not directly depend on the scene complexity. Having only a single depth and <br>object ID map representing the pixels seen by the light source, we can <br>approximate penumbrae by searching the neighborhood of pixels warped from the <br>camera view for relevant blocker information.<br><br>We explain the basic technique in detail, showing how simple observations can <br>yield satisfying results. We also address sampling issues relevant to the <br>quality of the computed shadows, as well as speed-up techniques that are able <br>to bring the performance up to interactive frame rates.}, BOOKTITLE = {Proceedings of Graphics Interface 2002 (GI 2002)}, EDITOR = {McCool, Michael and St{\"u}rzlinger, Wolfgang}, PAGES = {219--228}, ADDRESS = {Calgary, Canada}, }
Endnote
%0 Conference Proceedings %A Brabec, Stefan %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Single Sample Soft Shadows Using Depth Maps : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-307E-B %F EDOC: 202164 %F OTHER: Local-ID: C125675300671F7B-0738FE02D2E46A7DC1256C9F0074CF17-Brabec:SSS:2002 %R 10.20380/GI2002.25 %D 2002 %B Graphics Interface 2002 %Z date of event: 2002-05-27 - 2002-05-29 %C Calgary, Canada %X In this paper we propose a new method for rendering soft shadows at interactive <br>frame rates. Although the algorithm only uses information obtained from a <br>single light source sample, it is capable of producing subjectively realistic <br>penumbra regions. We do not claim that the proposed method is physically <br>correct but rather that it is aesthetically correct. Since the algorithm <br>operates on sampled representations of the scene, the shadow computation does <br>not directly depend on the scene complexity. Having only a single depth and <br>object ID map representing the pixels seen by the light source, we can <br>approximate penumbrae by searching the neighborhood of pixels warped from the <br>camera view for relevant blocker information.<br><br>We explain the basic technique in detail, showing how simple observations can <br>yield satisfying results. We also address sampling issues relevant to the <br>quality of the computed shadows, as well as speed-up techniques that are able <br>to bring the performance up to interactive frame rates. %B Proceedings of Graphics Interface 2002 %E McCool, Michael; St&#252;rzlinger, Wolfgang %P 219 - 228 %I A K Peters %@ 1-56881-183-7
Brabec, S., Annen, T., and Seidel, H.-P. 2002b. Practical Shadow Mapping. Journal of Graphics Tools7, 4.
Abstract
In this paper we present several methods that can greatly improve <br>image quality when using the shadow mapping algorithm.<br>Shadow artifacts introduced by shadow mapping<br>are mainly due to low resolution shadow maps and/or the limited<br>numerical precision used when performing the shadow test.<br>These problems especially arise when the light source's viewing<br>frustum, from which the shadow map is generated, is not adjusted <br>to the actual camera view.<br>We show how a tight fitting frustum can be computed such that <br>the shadow mapping algorithm concentrates on the visible parts of the<br>scene and takes advantage of nearly the full available precision. <br>Furthermore, we recommend uniformly spaced depth values <br>in contrast to perspectively spaced depths in order to equally sample<br>the scene seen from the light source.
Export
BibTeX
@article{Brabec-et-al_JGT02, TITLE = {Practical Shadow Mapping}, AUTHOR = {Brabec, Stefan and Annen, Thomas and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1086-7651}, DOI = {10.1080/10867651.2002.10487567}, LOCALID = {Local-ID: C125675300671F7B-76B84C6EEF28D39BC1256CF000516733-Brabec2003:PSM}, PUBLISHER = {A.K. Peters}, ADDRESS = {Wellesley, MA}, YEAR = {2002}, DATE = {2002}, ABSTRACT = {In this paper we present several methods that can greatly improve <br>image quality when using the shadow mapping algorithm.<br>Shadow artifacts introduced by shadow mapping<br>are mainly due to low resolution shadow maps and/or the limited<br>numerical precision used when performing the shadow test.<br>These problems especially arise when the light source's viewing<br>frustum, from which the shadow map is generated, is not adjusted <br>to the actual camera view.<br>We show how a tight fitting frustum can be computed such that <br>the shadow mapping algorithm concentrates on the visible parts of the<br>scene and takes advantage of nearly the full available precision. <br>Furthermore, we recommend uniformly spaced depth values <br>in contrast to perspectively spaced depths in order to equally sample<br>the scene seen from the light source.}, JOURNAL = {Journal of Graphics Tools}, VOLUME = {7}, NUMBER = {4}, PAGES = {9--18}, }
Endnote
%0 Journal Article %A Brabec, Stefan %A Annen, Thomas %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Practical Shadow Mapping : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2DC6-A %F EDOC: 202041 %F OTHER: Local-ID: C125675300671F7B-76B84C6EEF28D39BC1256CF000516733-Brabec2003:PSM %R 10.1080/10867651.2002.10487567 %D 2002 %* Review method: peer-reviewed %X In this paper we present several methods that can greatly improve <br>image quality when using the shadow mapping algorithm.<br>Shadow artifacts introduced by shadow mapping<br>are mainly due to low resolution shadow maps and/or the limited<br>numerical precision used when performing the shadow test.<br>These problems especially arise when the light source's viewing<br>frustum, from which the shadow map is generated, is not adjusted <br>to the actual camera view.<br>We show how a tight fitting frustum can be computed such that <br>the shadow mapping algorithm concentrates on the visible parts of the<br>scene and takes advantage of nearly the full available precision. <br>Furthermore, we recommend uniformly spaced depth values <br>in contrast to perspectively spaced depths in order to equally sample<br>the scene seen from the light source. %J Journal of Graphics Tools %V 7 %N 4 %& 9 %P 9 - 18 %I A.K. Peters %C Wellesley, MA %@ false
Anoshkina, E., Belyaev, A., and Seidel, H.-P. 2002. Asymptotic Analysis of Three-Point Approximations of Vertex Normals and Curvatures. Proceedings of Vision, Modeling, and Visualization 2002 (VMV 2002), Akademische Verlagsgesellschaft Aka GmbH.
Export
BibTeX
@inproceedings{Anoshkina-et-al_VMV02, TITLE = {Asymptotic Analysis of Three-Point Approximations of Vertex Normals and Curvatures}, AUTHOR = {Anoshkina, Elena and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, LOCALID = {Local-ID: C125675300671F7B-FC2F43207EB62F19C1256CB70068E5EA-vmv02abs}, PUBLISHER = {Akademische Verlagsgesellschaft Aka GmbH}, YEAR = {2002}, DATE = {2002}, BOOKTITLE = {Proceedings of Vision, Modeling, and Visualization 2002 (VMV 2002)}, PAGES = {211--216}, ADDRESS = {Erlangen, Germany}, }
Endnote
%0 Conference Proceedings %A Anoshkina, Elena %A Belyaev, Alexander %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Asymptotic Analysis of Three-Point Approximations of Vertex Normals and Curvatures : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2F10-3 %F EDOC: 202154 %F OTHER: Local-ID: C125675300671F7B-FC2F43207EB62F19C1256CB70068E5EA-vmv02abs %D 2002 %B 7th International Fall Workshop on Vision, Modeling, and Visualization %Z date of event: 2002-11-20 - 2002-11-22 %C Erlangen, Germany %B Proceedings of Vision, Modeling, and Visualization 2002 %P 211 - 216 %I Akademische Verlagsgesellschaft Aka GmbH
Albrecht, I., Haber, J., Kähler, K., Schröder, M., and Seidel, H.-P. 2002a. May I talk to you? : -) - Facial Animation from Text. Proceedings of the 10th Pacific Conference on Computer Graphics and Applications (PG 2002), IEEE explore.
Export
BibTeX
@inproceedings{DBLP:conf/pg/AlbrechtHKSS02, TITLE = {May I talk to you? : -) -- Facial Animation from Text}, AUTHOR = {Albrecht, Irene and Haber, J{\"o}rg and K{\"a}hler, Kolja and Schr{\"o}der, Marc and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-1784-6}, DOI = {10.1109/PCCGA.2002.1167841}, PUBLISHER = {IEEE explore}, YEAR = {2002}, DATE = {2002}, BOOKTITLE = {Proceedings of the 10th Pacific Conference on Computer Graphics and Applications (PG 2002)}, EDITOR = {Coquillart, Sabine and Shum, Heung-Yeung and Hu, Shi-Min}, PAGES = {77--86}, ADDRESS = {Beijing, China}, }
Endnote
%0 Conference Proceedings %A Albrecht, Irene %A Haber, J&#246;rg %A K&#228;hler, Kolja %A Schr&#246;der, Marc %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T May I talk to you? : -) - Facial Animation from Text : %G eng %U http://hdl.handle.net/21.11116/0000-000E-6EB1-F %R 10.1109/PCCGA.2002.1167841 %D 2002 %B 10th Pacific Conference on Computer Graphics and Applications %Z date of event: 2002-10-09 - 2002-10-11 %C Beijing, China %B Proceedings of the 10th Pacific Conference on Computer Graphics and Applications %E Coquillart, Sabine; Shum, Heung-Yeung; Hu, Shi-Min %P 77 - 86 %I IEEE explore %@ 0-7695-1784-6
Albrecht, I., Haber, J., and Seidel, H.-P. 2002b. Automatic Generation of Non-Verbal Facial Expressions from Speech. Advances in Modelling, Animation and Rendering (Proceedings Computer Graphics International 2002), Springer.
Abstract
Speech synchronized facial animation that controls only the movement of the mouth is typically perceived as wooden and unnatural. We propose a method to generate additional facial expressions such as movement of the head, the eyes, and the eyebrows fully automatically from the input speech signal. This is achieved by extracting prosodic parameters such as pitch flow and power spectrum from the speech signal and using them to control facial animation parameters in accordance to results from paralinguistic research.
Export
BibTeX
@inproceedings{Albrecht:AGNVFES, TITLE = {Automatic Generation of Non-Verbal Facial Expressions from Speech}, AUTHOR = {Albrecht, Irene and Haber, J{\"o}rg and Seidel, Hans-Peter}, EDITOR = {Vince, John and Earnshaw, Rae}, LANGUAGE = {eng}, ISBN = {1-85233-654-4}, LOCALID = {Local-ID: C125675300671F7B-AAA0D39542763367C1256B6E00379ECA-Albrecht:AGNVFES}, PUBLISHER = {Springer}, YEAR = {2002}, DATE = {2002}, ABSTRACT = {Speech synchronized facial animation that controls only the movement of the mouth is typically perceived as wooden and unnatural. We propose a method to generate additional facial expressions such as movement of the head, the eyes, and the eyebrows fully automatically from the input speech signal. This is achieved by extracting prosodic parameters such as pitch flow and power spectrum from the speech signal and using them to control facial animation parameters in accordance to results from paralinguistic research.}, BOOKTITLE = {Advances in Modelling, Animation and Rendering (Proceedings Computer Graphics International 2002)}, PAGES = {283--293}, ADDRESS = {Bradford, UK}, }
Endnote
%0 Conference Proceedings %A Albrecht, Irene %A Haber, J&#246;rg %A Seidel, Hans-Peter %E Vince, John %E Earnshaw, Rae %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Automatic Generation of Non-Verbal Facial Expressions from Speech : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2F23-A %F EDOC: 202155 %F OTHER: Local-ID: C125675300671F7B-AAA0D39542763367C1256B6E00379ECA-Albrecht:AGNVFES %D 2002 %B CGI 2002 %Z date of event: 2002-07-01 - 2002-07-05 %C Bradford, UK %X Speech synchronized facial animation that controls only the movement of the mouth is typically perceived as wooden and unnatural. We propose a method to generate additional facial expressions such as movement of the head, the eyes, and the eyebrows fully automatically from the input speech signal. This is achieved by extracting prosodic parameters such as pitch flow and power spectrum from the speech signal and using them to control facial animation parameters in accordance to results from paralinguistic research. %B Advances in Modelling, Animation and Rendering (Proceedings Computer Graphics International 2002) %P 283 - 293 %I Springer %@ 1-85233-654-4
Albrecht, I., Haber, J., and Seidel, H.-P. 2002c. Speech Synchronization for Physics-based Facial Animation. Proceedings of the 10th International Conferences in Central Europe on Computer Graphics, Visualization and Computer Vision (WSCG 2002), UNION Agency.
Abstract
We present a method for generating realistic speech-synchronized <br>facial animations using a physics-based approach and support for <br>coarticulation, i.e.\ the coloring of a speech segment by <br>surrounding segments. We have implemented several extensions to <br>the original coarticulation algorithm of Cohen and Massaro. The <br>enhancements include an optimization to improve performance as <br>well as special treatment of closure and release phase of <br>bilabial stops and other phonemes. Furthermore, for phonemes <br>that are shorter than the sampling intervals of the algorithm <br>and might therefore be missed, additional key frames are created <br>to ensure their impact onto the animation.
Export
BibTeX
@inproceedings{Albrecht-et-al_WSCG02, TITLE = {Speech Synchronization for Physics-based Facial Animation}, AUTHOR = {Albrecht, Irene and Haber, J{\"o}rg and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1213-6972}, LOCALID = {Local-ID: C125675300671F7B-6E16111571B3F920C1256B410044CF71-Albrecht:2002:SSPFA}, PUBLISHER = {UNION Agency}, YEAR = {2002}, DATE = {2002}, ABSTRACT = {We present a method for generating realistic speech-synchronized <br>facial animations using a physics-based approach and support for <br>coarticulation, i.e.\ the coloring of a speech segment by <br>surrounding segments. We have implemented several extensions to <br>the original coarticulation algorithm of Cohen and Massaro. The <br>enhancements include an optimization to improve performance as <br>well as special treatment of closure and release phase of <br>bilabial stops and other phonemes. Furthermore, for phonemes <br>that are shorter than the sampling intervals of the algorithm <br>and might therefore be missed, additional key frames are created <br>to ensure their impact onto the animation.}, BOOKTITLE = {Proceedings of the 10th International Conferences in Central Europe on Computer Graphics, Visualization and Computer Vision (WSCG 2002)}, EDITOR = {Skala, V{\'a}clav}, PAGES = {9--16}, ADDRESS = {Plzen, Czech Republic}, }
Endnote
%0 Conference Proceedings %A Albrecht, Irene %A Haber, J&#246;rg %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Speech Synchronization for Physics-based Facial Animation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-3088-4 %F EDOC: 202148 %F OTHER: Local-ID: C125675300671F7B-6E16111571B3F920C1256B410044CF71-Albrecht:2002:SSPFA %D 2002 %B 10th International Conferences in Central Europe on Computer Graphics, Visualization and Computer Vision %Z date of event: 2002-02-04 - 2002-02-08 %C Plzen, Czech Republic %X We present a method for generating realistic speech-synchronized <br>facial animations using a physics-based approach and support for <br>coarticulation, i.e.\ the coloring of a speech segment by <br>surrounding segments. We have implemented several extensions to <br>the original coarticulation algorithm of Cohen and Massaro. The <br>enhancements include an optimization to improve performance as <br>well as special treatment of closure and release phase of <br>bilabial stops and other phonemes. Furthermore, for phonemes <br>that are shorter than the sampling intervals of the algorithm <br>and might therefore be missed, additional key frames are created <br>to ensure their impact onto the animation. %B Proceedings of the 10th International Conferences in Central Europe on Computer Graphics, Visualization and Computer Vision %E Skala, V&#225;clav %P 9 - 16 %I UNION Agency %@ false %U http://wscg.zcu.cz/wscg2002/Papers_2002/D83.pdf
2001
Wesche, G. and Seidel, H.-P. 2001. FreeDrawer: A Free-form Sketching System on the Responsive Workbench. VRST ’01, ACM Symposium on Virtual Reality Software and Technology, ACM.
Abstract
A sketching system for spline-based free-form surfaces on the Responsive <br>Workbench is presented. We propose 3D tools for curve drawing and deformation <br>techniques for curves and surfaces, adapted to the needs of designers. The user <br>directly draws curves in the virtual environment, using a tracked stylus as an <br>input device. A curve network can be formed, describing the skeleton of a <br>virtual model. The non-dominant hand positions and orients the model while the <br>dominant hand uses the editing tools. The curves and the resulting skinning <br>surfaces can interactively be deformed.
Export
BibTeX
@inproceedings{Wesche-Seidel_VRST01, TITLE = {{FreeDrawer}: A Free-form Sketching System on the Responsive Workbench}, AUTHOR = {Wesche, Gerold and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-58113-427-8}, DOI = {10.1145/505008.505041}, LOCALID = {Local-ID: C125675300671F7B-1341FF7EDB30390BC1256AB500583090-Wesche:2001:FFF}, PUBLISHER = {ACM}, YEAR = {2001}, DATE = {2001}, ABSTRACT = {A sketching system for spline-based free-form surfaces on the Responsive <br>Workbench is presented. We propose 3D tools for curve drawing and deformation <br>techniques for curves and surfaces, adapted to the needs of designers. The user <br>directly draws curves in the virtual environment, using a tracked stylus as an <br>input device. A curve network can be formed, describing the skeleton of a <br>virtual model. The non-dominant hand positions and orients the model while the <br>dominant hand uses the editing tools. The curves and the resulting skinning <br>surfaces can interactively be deformed.}, BOOKTITLE = {VRST '01, ACM Symposium on Virtual Reality Software and Technology}, PAGES = {167--174}, ADDRESS = {Banff, Canada}, }
Endnote
%0 Conference Proceedings %A Wesche, Gerold %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T FreeDrawer: A Free-form Sketching System on the Responsive Workbench : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-312D-8 %F EDOC: 202200 %F OTHER: Local-ID: C125675300671F7B-1341FF7EDB30390BC1256AB500583090-Wesche:2001:FFF %R 10.1145/505008.505041 %D 2001 %B ACM Symposium on Virtual Reality Software and Technology %Z date of event: 2001-11-15 - 2001-11-17 %C Banff, Canada %X A sketching system for spline-based free-form surfaces on the Responsive <br>Workbench is presented. We propose 3D tools for curve drawing and deformation <br>techniques for curves and surfaces, adapted to the needs of designers. The user <br>directly draws curves in the virtual environment, using a tracked stylus as an <br>input device. A curve network can be formed, describing the skeleton of a <br>virtual model. The non-dominant hand positions and orients the model while the <br>dominant hand uses the editing tools. The curves and the resulting skinning <br>surfaces can interactively be deformed. %B VRST '01 %P 167 - 174 %I ACM %@ 978-1-58113-427-8
Vorsatz, J. and Seidel, H.-P. 2001. Multiresolution Modeling and Interactive Deformation of Large 3D Meshes. Deformable Avatars, Springer.
Export
BibTeX
@inproceedings{Vorsatz-Seidel_DEFORM-AVATARS-2000, TITLE = {Multiresolution Modeling and Interactive Deformation of Large {3D} Meshes}, AUTHOR = {Vorsatz, Jens and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1868-4238}, ISBN = {978-0-7923-7446-6}, DOI = {10.1007/978-0-306-47002-8_5}, LOCALID = {Local-ID: C125675300671F7B-D1670F8BF88F5511C1256A2400516264-Vorsatz_deform2000}, PUBLISHER = {Springer}, YEAR = {2000}, DATE = {2001}, BOOKTITLE = {Deformable Avatars}, EDITOR = {Magnenat-Thalmann, Nadia and Thalmann, Daniel}, PAGES = {46--58}, SERIES = {IFIP Advances in Information and Communication Technology}, VOLUME = {68}, ADDRESS = {Geneva,Switzerland}, }
Endnote
%0 Conference Proceedings %A Vorsatz, Jens %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Multiresolution Modeling and Interactive Deformation of Large 3D Meshes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-32AE-D %F EDOC: 520193 %F OTHER: Local-ID: C125675300671F7B-D1670F8BF88F5511C1256A2400516264-Vorsatz_deform2000 %R 10.1007/978-0-306-47002-8_5 %D 2001 %B IFIP TC5/WG5.10 DEFORM'2000 Workshop and AVATARS'2000 Workshop %Z date of event: 2000-11-29 - 2000-11-30 %C Geneva,Switzerland %B Deformable Avatars %E Magnenat-Thalmann, Nadia; Thalmann, Daniel %P 46 - 58 %I Springer %@ 978-0-7923-7446-6 %B IFIP Advances in Information and Communication Technology %N 68 %@ false %U https://rdcu.be/dyIqB
Vorsatz, J., Rössl, C., Kobbelt, L., and Seidel, H.-P. 2001. Feature Sensitive Remeshing. Computer Graphics Forum, Blackwell.
Abstract
Remeshing artifacts are a fundamental problem when converting a given<br>geometry into a triangle mesh. We propose a new remeshing technique<br>that is sensitive to features. First, the resolution of the mesh is<br>iteratively adapted by a global restructuring process which<br>additionally optimizes the connectivity. Then a particle system<br>approach evenly distributes the vertices across the original<br>geometry. To exactly find the features we extend this relaxation<br>procedure by an effective mechanism to attract the vertices to feature<br>edges. The attracting force is imposed by means of a hierarchical<br>curvature field and does not require any thresholding parameters to<br>classify the features.
Export
BibTeX
@inproceedings{Vorsatz-et-al_Eurograph.01, TITLE = {Feature Sensitive Remeshing}, AUTHOR = {Vorsatz, Jens and R{\"o}ssl, Christian and Kobbelt, Leif and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/1467-8659.00532}, LOCALID = {Local-ID: C125675300671F7B-CEA379D851BE5487C1256A14004393D6-Vorsatz2001_eg}, PUBLISHER = {Blackwell}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2003}, DATE = {2001}, ABSTRACT = {Remeshing artifacts are a fundamental problem when converting a given<br>geometry into a triangle mesh. We propose a new remeshing technique<br>that is sensitive to features. First, the resolution of the mesh is<br>iteratively adapted by a global restructuring process which<br>additionally optimizes the connectivity. Then a particle system<br>approach evenly distributes the vertices across the original<br>geometry. To exactly find the features we extend this relaxation<br>procedure by an effective mechanism to attract the vertices to feature<br>edges. The attracting force is imposed by means of a hierarchical<br>curvature field and does not require any thresholding parameters to<br>classify the features.}, BOOKTITLE = {Proceedings of Eurographics 2001}, EDITOR = {Chalmers, Alan and Rhyne, Theresa-Marie}, PAGES = {393--401}, JOURNAL = {Computer Graphics Forum}, VOLUME = {20}, ISSUE = {3}, ADDRESS = {Manchester, UK}, }
Endnote
%0 Conference Proceedings %A Vorsatz, Jens %A R&#246;ssl, Christian %A Kobbelt, Leif %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Feature Sensitive Remeshing : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-328F-6 %F EDOC: 520191 %F OTHER: Local-ID: C125675300671F7B-CEA379D851BE5487C1256A14004393D6-Vorsatz2001_eg %R 10.1111/1467-8659.00532 %D 2001 %B Eurographics 2001 %Z date of event: 2003-05-08 - 2003-05-12 %C Manchester, UK %X Remeshing artifacts are a fundamental problem when converting a given<br>geometry into a triangle mesh. We propose a new remeshing technique<br>that is sensitive to features. First, the resolution of the mesh is<br>iteratively adapted by a global restructuring process which<br>additionally optimizes the connectivity. Then a particle system<br>approach evenly distributes the vertices across the original<br>geometry. To exactly find the features we extend this relaxation<br>procedure by an effective mechanism to attract the vertices to feature<br>edges. The attracting force is imposed by means of a hierarchical<br>curvature field and does not require any thresholding parameters to<br>classify the features. %B Proceedings of Eurographics 2001 %E Chalmers, Alan; Rhyne, Theresa-Marie %P 393 - 401 %I Blackwell %J Computer Graphics Forum %V 20 %N 3 %I Blackwell-Wiley %@ false
Schneider, R., Kobbelt, L., and Seidel, H.-P. 2001. Improved Bi-Laplacian Mesh Fairing. Mathematical Methods for Curves and Surfaces: Oslo 2000, Vanderbilt University.
Export
BibTeX
@inproceedings{Schneider2001b, TITLE = {Improved Bi-Laplacian Mesh Fairing}, AUTHOR = {Schneider, Robert and Kobbelt, Leif and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-8265-1378-6}, LOCALID = {Local-ID: C125675300671F7B-1EF3C49B0EC01DC3C12569DC00444B4C-Schneider2001b}, PUBLISHER = {Vanderbilt University}, YEAR = {2001}, DATE = {2001}, BOOKTITLE = {Mathematical Methods for Curves and Surfaces: Oslo 2000}, EDITOR = {Lyche, Tom and Schumaker, Larry L.}, PAGES = {445--454}, SERIES = {Innovations in Applied Mathematics}, }
Endnote
%0 Conference Proceedings %A Schneider, Robert %A Kobbelt, Leif %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Improved Bi-Laplacian Mesh Fairing : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-32A3-3 %F EDOC: 520160 %F OTHER: Local-ID: C125675300671F7B-1EF3C49B0EC01DC3C12569DC00444B4C-Schneider2001b %I Vanderbilt University %D 2001 %B Untitled Event %Z date of event: 2001 - %C Oslo, Norway %B Mathematical Methods for Curves and Surfaces: Oslo 2000 %E Lyche, Tom; Schumaker, Larry L. %P 445 - 454 %I Vanderbilt University %@ 0-8265-1378-6 %B Innovations in Applied Mathematics
Schirmacher, H., Li, M., and Seidel, H.-P. 2001a. On-the-fly Processing of Generalized Lumigraphs. Computer Graphics Forum, Blackwell.
Abstract
We introduce a flexible and powerful concept for reconstructing arbitrary views <br>from multiple source images on<br>the fly. Our approach is based on a Lumigraph structure with per-pixel depth <br>values, and generalizes the classical<br>two-plane parameterized light fields and Lumigraphs. With our technique, it is <br>possible to render arbitrary views<br>of time-varying, non-diffuse scenes at interactive frame rates, and it allows <br>using any kind of sensor that yields<br>images with dense depth information. We demonstrate the flexibility and <br>efficiency of our approach through various<br>examples.
Export
BibTeX
@inproceedings{Schirmacher-et-al_Eurograph.01, TITLE = {On-the-fly Processing of Generalized Lumigraphs}, AUTHOR = {Schirmacher, Hartmut and Li, Ming and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/1467-8659.00509}, LOCALID = {Local-ID: C125675300671F7B-411631A8CACC9183C1256A7F00281892-Schirmacher:2001:OTF}, PUBLISHER = {Blackwell}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2001}, DATE = {2001}, ABSTRACT = {We introduce a flexible and powerful concept for reconstructing arbitrary views <br>from multiple source images on<br>the fly. Our approach is based on a Lumigraph structure with per-pixel depth <br>values, and generalizes the classical<br>two-plane parameterized light fields and Lumigraphs. With our technique, it is <br>possible to render arbitrary views<br>of time-varying, non-diffuse scenes at interactive frame rates, and it allows <br>using any kind of sensor that yields<br>images with dense depth information. We demonstrate the flexibility and <br>efficiency of our approach through various<br>examples.}, BOOKTITLE = {Proceedings of Eurographics 2001}, EDITOR = {Chalmers, Alan and Rhyne, Theresa-Marie}, PAGES = {165--174}, JOURNAL = {Computer Graphics Forum}, VOLUME = {20}, ISSUE = {3}, ADDRESS = {Manchester, UK}, }
Endnote
%0 Conference Proceedings %A Schirmacher, Hartmut %A Li, Ming %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T On-the-fly Processing of Generalized Lumigraphs : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-32BA-2 %F EDOC: 520210 %F OTHER: Local-ID: C125675300671F7B-411631A8CACC9183C1256A7F00281892-Schirmacher:2001:OTF %R 10.1111/1467-8659.00509 %D 2001 %B Eurographics 2001 %Z date of event: 2001 - %C Manchester, UK %X We introduce a flexible and powerful concept for reconstructing arbitrary views <br>from multiple source images on<br>the fly. Our approach is based on a Lumigraph structure with per-pixel depth <br>values, and generalizes the classical<br>two-plane parameterized light fields and Lumigraphs. With our technique, it is <br>possible to render arbitrary views<br>of time-varying, non-diffuse scenes at interactive frame rates, and it allows <br>using any kind of sensor that yields<br>images with dense depth information. We demonstrate the flexibility and <br>efficiency of our approach through various<br>examples. %B Proceedings of Eurographics 2001 %E Chalmers, Alan; Rhyne, Theresa-Marie %P 165 - 174 %I Blackwell %J Computer Graphics Forum %V 20 %N 3 %I Blackwell-Wiley %@ false
Schirmacher, H., Vogelgsang, C., Seidel, H.-P., and Greiner, G. 2001b. Efficient Free Form Light Field Rendering. Vision, Modeling and Visualization 2001 (VMV 2001), Aka GmbH.
Export
BibTeX
@inproceedings{Schirmacher-et-al_VMV01, TITLE = {Efficient Free Form Light Field Rendering}, AUTHOR = {Schirmacher, Hartmut and Vogelgsang, Christian and Seidel, Hans-Peter and Greiner, G{\"u}nther}, LANGUAGE = {eng}, ISBN = {3-89838-028-9}, URL = {http://www.mpi-sb.mpg.de/~htschirm/publ/Schirmacher:2001:EFF.pdf}, LOCALID = {Local-ID: C125675300671F7B-8320DE37B415569FC1256A99002E9E69-Schirmacher:2001:EFF}, PUBLISHER = {Aka GmbH}, YEAR = {2001}, DATE = {2001}, BOOKTITLE = {Vision, Modeling and Visualization 2001 (VMV 2001)}, EDITOR = {Ertl, Thomas and Girod, Bernd and Greiner, G{\"u}nther and Niemann, Heinrich and Seidel, Hans-Peter}, PAGES = {249--256}, ADDRESS = {Stuttgart, Germany}, }
Endnote
%0 Conference Proceedings %A Schirmacher, Hartmut %A Vogelgsang, Christian %A Seidel, Hans-Peter %A Greiner, G&#252;nther %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Efficient Free Form Light Field Rendering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-327F-A %F EDOC: 520224 %U http://www.mpi-sb.mpg.de/~htschirm/publ/Schirmacher:2001:EFF.pdf %F OTHER: Local-ID: C125675300671F7B-8320DE37B415569FC1256A99002E9E69-Schirmacher:2001:EFF %D 2001 %B 6th International Fall Workshop on Vision, Modeling, and Visualization %Z date of event: 2001-11-21 - 2001-11-23 %C Stuttgart, Germany %B Vision, Modeling and Visualization 2001 %E Ertl, Thomas; Girod, Bernd; Greiner, G&#252;nther; Niemann, Heinrich; Seidel, Hans-Peter %P 249 - 256 %I Aka GmbH %@ 3-89838-028-9
Scheel, A., Stamminger, M., and Seidel, H.-P. 2001a. Thrifty Final Gather for Radiosity. Rendering Techniques 2001 (EGSR 2001), Springer.
Export
BibTeX
@inproceedings{Scheel-et-al_EGSR01, TITLE = {Thrifty Final Gather for Radiosity}, AUTHOR = {Scheel, Annette and Stamminger, Marc and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0946-2767}, ISBN = {978-3-211-83709-2}, DOI = {10.1007/978-3-7091-6242-2_1}, LOCALID = {Local-ID: C125675300671F7B-B8DB9D578EB3CD31C1256A7D0059548F-Scheel2001}, PUBLISHER = {Springer}, YEAR = {2001}, DATE = {2001}, BOOKTITLE = {Rendering Techniques 2001 (EGSR 2001)}, EDITOR = {Gortler, Steven and Myszkowski, Karol}, PAGES = {1--12}, SERIES = {Eurographics}, ADDRESS = {London, UK}, }
Endnote
%0 Conference Proceedings %A Scheel, Annette %A Stamminger, Marc %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Thrifty Final Gather for Radiosity : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-32D3-8 %F EDOC: 520208 %F OTHER: Local-ID: C125675300671F7B-B8DB9D578EB3CD31C1256A7D0059548F-Scheel2001 %R 10.1007/978-3-7091-6242-2_1 %D 2001 %B 12th Eurographics Workshop on Rendering Techniques %Z date of event: 2001-06-25 - 2001-06-27 %C London, UK %B Rendering Techniques 2001 %E Gortler, Steven; Myszkowski, Karol %P 1 - 12 %I Springer %@ 978-3-211-83709-2 %B Eurographics %@ false
Scheel, A., Stamminger, M., Pütz, J., and Seidel, H.-P. 2001b. Enhancements to Directional Coherence Maps. Proceedings of the 9th International Conference in Central Europe on Computer Graphics, Visualization and Computer Vision (WSCG 2001), University of West Bohemia.
Export
BibTeX
@inproceedings{Scheel-et-al_WSCG01, TITLE = {Enhancements to Directional Coherence Maps}, AUTHOR = {Scheel, Annette and Stamminger, Marc and P{\"u}tz, J{\"o}rg and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://wscg.zcu.cz/wscg2001/Papers_2001/R345.pdf.gz}, LOCALID = {Local-ID: C125675300671F7B-146A0AE3E38D9CE1C1256A7D005A0625-Scheel2001b}, PUBLISHER = {University of West Bohemia}, YEAR = {2001}, DATE = {2001}, BOOKTITLE = {Proceedings of the 9th International Conference in Central Europe on Computer Graphics, Visualization and Computer Vision (WSCG 2001)}, EDITOR = {Magnenat-Thalmann, Nadia and Skala, Vaclav}, PAGES = {403--410}, ADDRESS = {Plzen, Czech Republic}, }
Endnote
%0 Conference Proceedings %A Scheel, Annette %A Stamminger, Marc %A P&#252;tz, J&#246;rg %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Enhancements to Directional Coherence Maps : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-3284-B %F EDOC: 520209 %U http://wscg.zcu.cz/wscg2001/Papers_2001/R345.pdf.gz %F OTHER: Local-ID: C125675300671F7B-146A0AE3E38D9CE1C1256A7D005A0625-Scheel2001b %D 2001 %B 9th International Conference in Central Europe on Computer Graphics, Visualization and Computer Vision %Z date of event: 2001-02-05 - 2001-02-09 %C Plzen, Czech Republic %B Proceedings of the 9th International Conference in Central Europe on Computer Graphics, Visualization and Computer Vision %E Magnenat-Thalmann, Nadia; Skala, Vaclav %P 403 - 410 %I University of West Bohemia
Rössl, C., Kobbelt, L., and Seidel, H.-P. 2001. Recovering Structural Information from Triangulated Surfaces. Mathematical Methods for Curves and Surfaces: Oslo 2000, Vanderbilt University.
Abstract
We present a technique for recovering structural information from triangular meshes that can then be used for segmentation, e.g. in reverse engineering applications. In a preprocessing step, we detect feature regions on the surface by classifying the vertices according to some discrete curvature measure. Then we apply a skeletonization algorithm for extracting feature lines from these regions. To achieve this, we generalize the concept of morphological operators to unorganized triangle meshes, providing techniques for noise reduction on the binary feature classification and for skeletonization. The necessary operations are easy to implement, robust, and can be executed efficiently on a mesh data structure.
Export
BibTeX
@inproceedings{Roessl2000_RSITS, TITLE = {Recovering Structural Information from Triangulated Surfaces}, AUTHOR = {R{\"o}ssl, Christian and Kobbelt, Leif and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-8265-1378-6}, LOCALID = {Local-ID: C125675300671F7B-FC412548BD01A02BC1256A9600458C57-Roessl2000_RSITS}, PUBLISHER = {Vanderbilt University}, YEAR = {2001}, DATE = {2001}, ABSTRACT = {We present a technique for recovering structural information from triangular meshes that can then be used for segmentation, e.g. in reverse engineering applications. In a preprocessing step, we detect feature regions on the surface by classifying the vertices according to some discrete curvature measure. Then we apply a skeletonization algorithm for extracting feature lines from these regions. To achieve this, we generalize the concept of morphological operators to unorganized triangle meshes, providing techniques for noise reduction on the binary feature classification and for skeletonization. The necessary operations are easy to implement, robust, and can be executed efficiently on a mesh data structure.}, BOOKTITLE = {Mathematical Methods for Curves and Surfaces: Oslo 2000}, EDITOR = {Lyche, Tom and Schumaker, Larry L.}, PAGES = {423--432}, }
Endnote
%0 Conference Proceedings %A R&#246;ssl, Christian %A Kobbelt, Leif %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Recovering Structural Information from Triangulated Surfaces : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-32C5-8 %F EDOC: 520221 %F OTHER: Local-ID: C125675300671F7B-FC412548BD01A02BC1256A9600458C57-Roessl2000_RSITS %I Vanderbilt University %D 2001 %B Untitled Event %Z date of event: 2001 - %C Oslo, Norway %X We present a technique for recovering structural information from triangular meshes that can then be used for segmentation, e.g. in reverse engineering applications. In a preprocessing step, we detect feature regions on the surface by classifying the vertices according to some discrete curvature measure. Then we apply a skeletonization algorithm for extracting feature lines from these regions. To achieve this, we generalize the concept of morphological operators to unorganized triangle meshes, providing techniques for noise reduction on the binary feature classification and for skeletonization. The necessary operations are easy to implement, robust, and can be executed efficiently on a mesh data structure. %B Mathematical Methods for Curves and Surfaces: Oslo 2000 %E Lyche, Tom; Schumaker, Larry L. %P 423 - 432 %I Vanderbilt University %@ 0-8265-1378-6
Myszkowski, K., Tawara, T., Akamine, H., and Seidel, H.-P. 2001. Perception-Guided Global Illumination Solution for Animation Rendering. Proceedings of the 28th Annual Conference on Computer Graphics and Interactive Techniques, ACM.
Export
BibTeX
@inproceedings{Myszkowski-et-al_SIGGRAPH01, TITLE = {Perception-Guided Global Illumination Solution for Animation Rendering}, AUTHOR = {Myszkowski, Karol and Tawara, Takehiro and Akamine, Hiroyuki and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-58113-374-5}, URL = {http://www.mpi-sb.mpg.de/resources/aqm/dynenv/paper/sg2001-myszkowski.pdf}, DOI = {10.1145/383259.383284}, LOCALID = {Local-ID: C125675300671F7B-1D7B8F7EA05FC2F8C1256A7D004EABD7-Myszkowski2001b}, PUBLISHER = {ACM}, YEAR = {2001}, DATE = {2001}, BOOKTITLE = {Proceedings of the 28th Annual Conference on Computer Graphics and Interactive Techniques}, EDITOR = {Fiume, Eugene}, PAGES = {221--230}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Myszkowski, Karol %A Tawara, Takehiro %A Akamine, Hiroyuki %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perception-Guided Global Illumination Solution for Animation Rendering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-32BD-B %F EDOC: 520203 %U http://www.mpi-sb.mpg.de/resources/aqm/dynenv/paper/sg2001-myszkowski.pdf %F OTHER: Local-ID: C125675300671F7B-1D7B8F7EA05FC2F8C1256A7D004EABD7-Myszkowski2001b %R 10.1145/383259.383284 %D 2001 %B 28th Annual Conference on Computer Graphics and Interactive Techniques %Z date of event: 2001-08-12 - 2001-08-17 %C Los Angeles, CA, USA %B Proceedings of the 28th Annual Conference on Computer Graphics and Interactive Techniques %E Fiume, Eugene %P 221 - 230 %I ACM %@ 978-1-58113-374-5
Lensch, H.P.A., Goesele, M., and Seidel, H.-P. 2001a. A Framework for the Acquisition, Processing and Interactive Display of High Quality 3D Models. Tutorial Notes of the DAGM 2001, Max-Planck-Institut für Informatik.
Abstract
This tutorial highlights some recent results on the acquisition and interactive display of high quality 3D models. For further use in photorealistic rendering or object recognition, a high quality representation must capture two different things: the shape of the model represented as a geometric description of its surface and on the other hand the appearance of the material or materials it is made of, e.g. the object's color, texture, or reflection properties. The tutorial shows how computer vision and computer graphics techniques can be seamlessly integrated into a single framework for the acquisition, processing, and interactive display of high quality 3D models.
Export
BibTeX
@inproceedings{Lensch:2001:DAGM, TITLE = {A Framework for the Acquisition, Processing and Interactive Display of High Quality {3D} Models}, AUTHOR = {Lensch, Hendrik P. A. and Goesele, Michael and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://data.mpi-sb.mpg.de/internet/reports.nsf/NumberView/2001-4-005}, LOCALID = {Local-ID: C125675300671F7B-9E55AC65D9AA9B3FC1256AB900372B0A-Lensch:2001:DAGM}, PUBLISHER = {Max-Planck-Institut f{\"u}r Informatik}, YEAR = {2001}, DATE = {2001}, ABSTRACT = {This tutorial highlights some recent results on the acquisition and interactive display of high quality 3D models. For further use in photorealistic rendering or object recognition, a high quality representation must capture two different things: the shape of the model represented as a geometric description of its surface and on the other hand the appearance of the material or materials it is made of, e.g. the object's color, texture, or reflection properties. The tutorial shows how computer vision and computer graphics techniques can be seamlessly integrated into a single framework for the acquisition, processing, and interactive display of high quality 3D models.}, BOOKTITLE = {Tutorial Notes of the DAGM 2001}, PAGES = {1--39}, }
Endnote
%0 Conference Proceedings %A Lensch, Hendrik P. A. %A Goesele, Michael %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Framework for the Acquisition, Processing and Interactive Display of High Quality 3D Models : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-325F-1 %F EDOC: 520232 %U http://data.mpi-sb.mpg.de/internet/reports.nsf/NumberView/2001-4-005 %F OTHER: Local-ID: C125675300671F7B-9E55AC65D9AA9B3FC1256AB900372B0A-Lensch:2001:DAGM %I Max-Planck-Institut f&#252;r Informatik %D 2001 %B Untitled Event %Z date of event: 2001 - %C Munich, Germany %X This tutorial highlights some recent results on the acquisition and interactive display of high quality 3D models. For further use in photorealistic rendering or object recognition, a high quality representation must capture two different things: the shape of the model represented as a geometric description of its surface and on the other hand the appearance of the material or materials it is made of, e.g. the object's color, texture, or reflection properties. The tutorial shows how computer vision and computer graphics techniques can be seamlessly integrated into a single framework for the acquisition, processing, and interactive display of high quality 3D models. %B Tutorial Notes of the DAGM 2001 %P 1 - 39 %I Max-Planck-Institut f&#252;r Informatik
Lensch, H.P.A., Kautz, J., Gösele, M., Heidrich, W., and Seidel, H.-P. 2001b. Image-based reconstruction of spatially varying materials. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
The measurement of accurate material properties is an important step towards photorealistic rendering. Many real-world objects are composed of a number of materials that often show subtle changes even within a single material. Thus, for photorealistic rendering both the general surface properties as well as the spatially varying effects of the object are needed. We present an image-based measuring method that robustly detects the different materials of real objects and fits an average bidirectional reflectance distribution function (BRDF) to each of them. In order to model the local changes as well, we project the measured data for each surface point into a basis formed by the recovered BRDFs leading to a truly spatially varying BRDF representation. A high quality model of a real object can be generated with relatively few input data. The generated model allows for rendering under arbitrary viewing and lighting conditions and realistically reproduces the appearance of the original object.
Export
BibTeX
@techreport{LenschKautzGoeseleHeidrichSeidel2001, TITLE = {Image-based reconstruction of spatially varying materials}, AUTHOR = {Lensch, Hendrik P. A. and Kautz, Jan and G{\"o}sele, Michael and Heidrich, Wolfgang and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2001-4-001}, NUMBER = {MPI-I-2001-4-001}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2001}, DATE = {2001}, ABSTRACT = {The measurement of accurate material properties is an important step towards photorealistic rendering. Many real-world objects are composed of a number of materials that often show subtle changes even within a single material. Thus, for photorealistic rendering both the general surface properties as well as the spatially varying effects of the object are needed. We present an image-based measuring method that robustly detects the different materials of real objects and fits an average bidirectional reflectance distribution function (BRDF) to each of them. In order to model the local changes as well, we project the measured data for each surface point into a basis formed by the recovered BRDFs leading to a truly spatially varying BRDF representation. A high quality model of a real object can be generated with relatively few input data. The generated model allows for rendering under arbitrary viewing and lighting conditions and realistically reproduces the appearance of the original object.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Lensch, Hendrik P. A. %A Kautz, Jan %A G&#246;sele, Michael %A Heidrich, Wolfgang %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Image-based reconstruction of spatially varying materials : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-6CAD-3 %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2001-4-001 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2001 %P 20 p. %X The measurement of accurate material properties is an important step towards photorealistic rendering. Many real-world objects are composed of a number of materials that often show subtle changes even within a single material. Thus, for photorealistic rendering both the general surface properties as well as the spatially varying effects of the object are needed. We present an image-based measuring method that robustly detects the different materials of real objects and fits an average bidirectional reflectance distribution function (BRDF) to each of them. In order to model the local changes as well, we project the measured data for each surface point into a basis formed by the recovered BRDFs leading to a truly spatially varying BRDF representation. A high quality model of a real object can be generated with relatively few input data. The generated model allows for rendering under arbitrary viewing and lighting conditions and realistically reproduces the appearance of the original object. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Lensch, H.P.A., Kautz, J., Goesele, M., and Seidel, H.-P. 2001c. A Framework for the Acquisition, Processing, Transmission, and Interactive Display of High Quality 3D Models on the Web. Tutorial Notes of the Web3D Conference 2001, Web3D Consortium.
Abstract
Digital documents often require highly detailed representations of real world objects. This is especially true for advanced e-commerce applications and other multimedia data bases like online encyclopaedias or virtual museums. Their further success will strongly depend on advances in the field of high quality object representation, distribution and rendering. This tutorial highlights some recent results on the acquisition and interactive display of high quality 3D models and shows how these results can be seamlessly integrated with previous work into a single framework for the acquisition, processing, transmission, and interactive display of high quality 3D models on the Web.
Export
BibTeX
@inproceedings{Lensch:2001:Web3D, TITLE = {A Framework for the Acquisition, Processing, Transmission, and Interactive Display of High Quality {3D} Models on the Web}, AUTHOR = {Lensch, Hendrik P. A. and Kautz, Jan and Goesele, Michael and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://www.c-lab.de/web3d2001/Workshops/tutorial_HighQuality3DModels_web3d2001.pdf}, LOCALID = {Local-ID: C125675300671F7B-1B10DACD4619C8BCC1256AB90036C38E-Lensch:2001:Web3D}, PUBLISHER = {Web3D Consortium}, YEAR = {2001}, DATE = {2001}, ABSTRACT = {Digital documents often require highly detailed representations of real world objects. This is especially true for advanced e-commerce applications and other multimedia data bases like online encyclopaedias or virtual museums. Their further success will strongly depend on advances in the field of high quality object representation, distribution and rendering. This tutorial highlights some recent results on the acquisition and interactive display of high quality 3D models and shows how these results can be seamlessly integrated with previous work into a single framework for the acquisition, processing, transmission, and interactive display of high quality 3D models on the Web.}, BOOKTITLE = {Tutorial Notes of the Web3D Conference 2001}, PAGES = {1--13}, }
Endnote
%0 Conference Proceedings %A Lensch, Hendrik P. A. %A Kautz, Jan %A Goesele, Michael %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Framework for the Acquisition, Processing, Transmission, and Interactive Display of High Quality 3D Models on the Web : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-3263-6 %F EDOC: 520231 %U http://www.c-lab.de/web3d2001/Workshops/tutorial_HighQuality3DModels_web3d2001.pdf %F OTHER: Local-ID: C125675300671F7B-1B10DACD4619C8BCC1256AB90036C38E-Lensch:2001:Web3D %I Web3D Consortium %D 2001 %B Untitled Event %Z date of event: 2001 - %C Paderborn, Germany %X Digital documents often require highly detailed representations of real world objects. This is especially true for advanced e-commerce applications and other multimedia data bases like online encyclopaedias or virtual museums. Their further success will strongly depend on advances in the field of high quality object representation, distribution and rendering. This tutorial highlights some recent results on the acquisition and interactive display of high quality 3D models and shows how these results can be seamlessly integrated with previous work into a single framework for the acquisition, processing, transmission, and interactive display of high quality 3D models on the Web. %B Tutorial Notes of the Web3D Conference 2001 %P 1 - 13 %I Web3D Consortium
Lensch, H.P.A., Kautz, J., Goesele, M., Heidrich, W., and Seidel, H.-P. 2001d. Image-Based Reconstruction of Spatially Varying Materials. Rendering Techniques 2001 (EGSR 2001), Springer.
Abstract
The measurement of accurate material properties is an important step<br>towards photorealistic rendering. Many real-world objects are composed<br>of a number of materials that often show subtle changes even within a<br>single material. Thus, for photorealistic rendering both the general<br>surface properties as well as the spatially varying effects of the<br>object are needed.<br><br>We present an image-based measuring method that robustly detects the<br>different materials of real objects and fits an average bidirectional<br>reflectance distribution function (BRDF) to each of them. In order to<br>model the local changes as well, we project the measured data for each<br>surface point into a basis formed by the recovered BRDFs leading to a<br>truly spatially varying BRDF representation.<br><br>A high quality model of a real object can be generated with relatively<br>few input data. The generated model allows for rendering under<br>arbitrary viewing and lighting conditions and realistically reproduces<br>the appearance of the original object.
Export
BibTeX
@inproceedings{Lensch-et-al_EGSR01, TITLE = {Image-Based Reconstruction of Spatially Varying Materials}, AUTHOR = {Lensch, Hendrik P. A. and Kautz, Jan and Goesele, Michael and Heidrich, Wolfgang and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0946-2767}, ISBN = {978-3-211-83709-2}, DOI = {10.1007/978-3-7091-6242-2_10}, LOCALID = {Local-ID: C125675300671F7B-249EA7C6EDD9BBF4C1256A7D0052B695-Lensch:2001:IRS}, PUBLISHER = {Springer}, YEAR = {2001}, DATE = {2001}, ABSTRACT = {The measurement of accurate material properties is an important step<br>towards photorealistic rendering. Many real-world objects are composed<br>of a number of materials that often show subtle changes even within a<br>single material. Thus, for photorealistic rendering both the general<br>surface properties as well as the spatially varying effects of the<br>object are needed.<br><br>We present an image-based measuring method that robustly detects the<br>different materials of real objects and fits an average bidirectional<br>reflectance distribution function (BRDF) to each of them. In order to<br>model the local changes as well, we project the measured data for each<br>surface point into a basis formed by the recovered BRDFs leading to a<br>truly spatially varying BRDF representation.<br><br>A high quality model of a real object can be generated with relatively<br>few input data. The generated model allows for rendering under<br>arbitrary viewing and lighting conditions and realistically reproduces<br>the appearance of the original object.}, BOOKTITLE = {Rendering Techniques 2001 (EGSR 2001)}, EDITOR = {Gortler, Steven and Myszkowski, Karol}, PAGES = {103--114}, SERIES = {Eurographics}, ADDRESS = {London, UK}, }
Endnote
%0 Conference Proceedings %A Lensch, Hendrik P. A. %A Kautz, Jan %A Goesele, Michael %A Heidrich, Wolfgang %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Image-Based Reconstruction of Spatially Varying Materials : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-32A1-7 %F EDOC: 520207 %F OTHER: Local-ID: C125675300671F7B-249EA7C6EDD9BBF4C1256A7D0052B695-Lensch:2001:IRS %R 10.1007/978-3-7091-6242-2_10 %D 2001 %B 12th Eurographics Workshop on Rendering Techniques %Z date of event: 2001-06-25 - 2001-06-27 %C London, UK %X The measurement of accurate material properties is an important step<br>towards photorealistic rendering. Many real-world objects are composed<br>of a number of materials that often show subtle changes even within a<br>single material. Thus, for photorealistic rendering both the general<br>surface properties as well as the spatially varying effects of the<br>object are needed.<br><br>We present an image-based measuring method that robustly detects the<br>different materials of real objects and fits an average bidirectional<br>reflectance distribution function (BRDF) to each of them. In order to<br>model the local changes as well, we project the measured data for each<br>surface point into a basis formed by the recovered BRDFs leading to a<br>truly spatially varying BRDF representation.<br><br>A high quality model of a real object can be generated with relatively<br>few input data. The generated model allows for rendering under<br>arbitrary viewing and lighting conditions and realistically reproduces<br>the appearance of the original object. %B Rendering Techniques 2001 %E Gortler, Steven; Myszkowski, Karol %P 103 - 114 %I Springer %@ 978-3-211-83709-2 %B Eurographics %@ false
Lensch, H.P.A., Kautz, J., Gösele, M., and Seidel, H.-P. 2001e. A framework for the acquisition, processing, transmission, and interactive display of high quality 3D models on the Web. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
Digital documents often require highly detailed representations of real world objects. This is especially true for advanced e-commerce applications and other multimedia data bases like online encyclopaedias or virtual museums. Their further success will strongly depend on advances in the field of high quality object representation, distribution and rendering. This tutorial highlights some recent results on the acquisition and interactive display of high quality 3D models and shows how these results can be seamlessly integrated with previous work into a single framework for the acquisition, processing, transmission, and interactive display of high quality 3D models on the Web.
Export
BibTeX
@techreport{LenschKautzGoeseleSeidel2001, TITLE = {A framework for the acquisition, processing, transmission, and interactive display of high quality {3D} models on the Web}, AUTHOR = {Lensch, Hendrik P. A. and Kautz, Jan and G{\"o}sele, Michael and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2001-4-002}, NUMBER = {MPI-I-2001-4-002}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2001}, DATE = {2001}, ABSTRACT = {Digital documents often require highly detailed representations of real world objects. This is especially true for advanced e-commerce applications and other multimedia data bases like online encyclopaedias or virtual museums. Their further success will strongly depend on advances in the field of high quality object representation, distribution and rendering. This tutorial highlights some recent results on the acquisition and interactive display of high quality 3D models and shows how these results can be seamlessly integrated with previous work into a single framework for the acquisition, processing, transmission, and interactive display of high quality 3D models on the Web.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Lensch, Hendrik P. A. %A Kautz, Jan %A G&#246;sele, Michael %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A framework for the acquisition, processing, transmission, and interactive display of high quality 3D models on the Web : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-6CAA-9 %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2001-4-002 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2001 %P 20 p. %X Digital documents often require highly detailed representations of real world objects. This is especially true for advanced e-commerce applications and other multimedia data bases like online encyclopaedias or virtual museums. Their further success will strongly depend on advances in the field of high quality object representation, distribution and rendering. This tutorial highlights some recent results on the acquisition and interactive display of high quality 3D models and shows how these results can be seamlessly integrated with previous work into a single framework for the acquisition, processing, transmission, and interactive display of high quality 3D models on the Web. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Lensch, H.P.A., Heidrich, W., and Seidel, H.-P. 2001f. A Silhouette-Based Algorithm for Texture Registration and Stitching. Graphical Models63, 4.
Abstract
In this paper a system is presented that automatically registers<br> and stitches textures acquired from multiple photographic images<br> onto the surface of a given corresponding 3D model. Within this<br> process the camera position, direction and field of view must be<br> determined for each of the images. For this registration, which<br> aligns a 2D image to a 3D model we present an efficient<br> hardware-accelerated silhouette-based algorithm working on different<br> image resolutions that accurately registers each image without any<br> user interaction. Besides the silhouettes, also the given texture<br> information can be used to improve accuracy by comparing one<br> stitched texture to already registered images resulting in a global<br> multi-view optimization. After the 3D-2D registration for each part<br> of the 3D model's surface the view is determined which provides the<br> best available texture. Textures are blended at the borders of<br> regions assigned to different views.
Export
BibTeX
@article{Lensch-et-al_Graph.Mod.01, TITLE = {A Silhouette-Based Algorithm for Texture Registration and Stitching}, AUTHOR = {Lensch, Hendrik P. A. and Heidrich, Wolfgang and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1524-0703}, URL = {http://www.idealibrary.com/links/doi/10.1006/gmod.2001.0554}, DOI = {10.1006/gmod.2001.0554}, LOCALID = {Local-ID: C125675300671F7B-2A48C98D369E5FA4C1256B9700487D72-Lensch2001:SBA}, PUBLISHER = {Academic Press}, ADDRESS = {San Diego, Calif.}, YEAR = {2001}, DATE = {2001}, ABSTRACT = {In this paper a system is presented that automatically registers<br> and stitches textures acquired from multiple photographic images<br> onto the surface of a given corresponding 3D model. Within this<br> process the camera position, direction and field of view must be<br> determined for each of the images. For this registration, which<br> aligns a 2D image to a 3D model we present an efficient<br> hardware-accelerated silhouette-based algorithm working on different<br> image resolutions that accurately registers each image without any<br> user interaction. Besides the silhouettes, also the given texture<br> information can be used to improve accuracy by comparing one<br> stitched texture to already registered images resulting in a global<br> multi-view optimization. After the 3D-2D registration for each part<br> of the 3D model's surface the view is determined which provides the<br> best available texture. Textures are blended at the borders of<br> regions assigned to different views.}, JOURNAL = {Graphical Models}, VOLUME = {63}, NUMBER = {4}, PAGES = {245--262}, }
Endnote
%0 Journal Article %A Lensch, Hendrik P. A. %A Heidrich, Wolfgang %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Silhouette-Based Algorithm for Texture Registration and Stitching : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-326A-7 %F EDOC: 520247 %U http://www.idealibrary.com/links/doi/10.1006/gmod.2001.0554 %F OTHER: Local-ID: C125675300671F7B-2A48C98D369E5FA4C1256B9700487D72-Lensch2001:SBA %R 10.1006/gmod.2001.0554 %D 2001 %* Review method: peer-reviewed %X In this paper a system is presented that automatically registers<br> and stitches textures acquired from multiple photographic images<br> onto the surface of a given corresponding 3D model. Within this<br> process the camera position, direction and field of view must be<br> determined for each of the images. For this registration, which<br> aligns a 2D image to a 3D model we present an efficient<br> hardware-accelerated silhouette-based algorithm working on different<br> image resolutions that accurately registers each image without any<br> user interaction. Besides the silhouettes, also the given texture<br> information can be used to improve accuracy by comparing one<br> stitched texture to already registered images resulting in a global<br> multi-view optimization. After the 3D-2D registration for each part<br> of the 3D model's surface the view is determined which provides the<br> best available texture. Textures are blended at the borders of<br> regions assigned to different views. %J Graphical Models %V 63 %N 4 %& 245 %P 245 - 262 %I Academic Press %C San Diego, Calif. %@ false
Lensch, H.P.A., Kautz, J., Goesele, M., and Seidel, H.-P. 2001g. 3D Model Acquisition Including Reflection Properties. Proceedings of the ECDL Workshop Generalized Documents, Selbstverlag.
Abstract
The measurement of accurate material properties is an important step towards the inclusion of realistic objects in digital documents. Many real-world objects are composed of a number of materials with subtle changes even within a single material. We present an image-based measuring method that robustly detects the different materials of real objects and fits an average bidirectional reflectance distribution function (BRDF) to each of them. In order to model the local changes as well, we project the measured data for each surface point into a basis formed by the recovered BRDFs leading to a truly spatially varying BRDF representation. A compact, high quality model of a real object can be generated with relatively few input data.
Export
BibTeX
@inproceedings{Lensch:2001:3MA, TITLE = {{3D} Model Acquisition Including Reflection Properties}, AUTHOR = {Lensch, Hendrik P. A. and Kautz, Jan and Goesele, Michael and Seidel, Hans-Peter}, LANGUAGE = {eng}, LOCALID = {Local-ID: C125675300671F7B-4054AA66E1012AB6C1256AB1002986CF-Lensch:2001:3MA}, PUBLISHER = {Selbstverlag}, YEAR = {2001}, DATE = {2001}, ABSTRACT = {The measurement of accurate material properties is an important step towards the inclusion of realistic objects in digital documents. Many real-world objects are composed of a number of materials with subtle changes even within a single material. We present an image-based measuring method that robustly detects the different materials of real objects and fits an average bidirectional reflectance distribution function (BRDF) to each of them. In order to model the local changes as well, we project the measured data for each surface point into a basis formed by the recovered BRDFs leading to a truly spatially varying BRDF representation. A compact, high quality model of a real object can be generated with relatively few input data.}, BOOKTITLE = {Proceedings of the ECDL Workshop Generalized Documents}, EDITOR = {Fellner, Dieter W. and Fuhr, Norbert and Witten, Ian}, PAGES = {1--6}, }
Endnote
%0 Conference Proceedings %A Lensch, Hendrik P. A. %A Kautz, Jan %A Goesele, Michael %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T 3D Model Acquisition Including Reflection Properties : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-3259-D %F EDOC: 520228 %F OTHER: Local-ID: C125675300671F7B-4054AA66E1012AB6C1256AB1002986CF-Lensch:2001:3MA %I Selbstverlag %D 2001 %B Untitled Event %Z date of event: 2001 - %C Darmstadt, Germany %X The measurement of accurate material properties is an important step towards the inclusion of realistic objects in digital documents. Many real-world objects are composed of a number of materials with subtle changes even within a single material. We present an image-based measuring method that robustly detects the different materials of real objects and fits an average bidirectional reflectance distribution function (BRDF) to each of them. In order to model the local changes as well, we project the measured data for each surface point into a basis formed by the recovered BRDFs leading to a truly spatially varying BRDF representation. A compact, high quality model of a real object can be generated with relatively few input data. %B Proceedings of the ECDL Workshop Generalized Documents %E Fellner, Dieter W.; Fuhr, Norbert; Witten, Ian %P 1 - 6 %I Selbstverlag
Lensch, H.P.A., Gösele, M., and Seidel, H.-P. 2001h. A framework for the acquisition, processing and interactive display of high quality 3D models. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
This tutorial highlights some recent results on the acquisition and interactive display of high quality 3D models. For further use in photorealistic rendering or object recognition, a high quality representation must capture two different things: the shape of the model represented as a geometric description of its surface and on the other hand the appearance of the material or materials it is made of, e.g. the object's color, texture, or reflection properties. The tutorial shows how computer vision and computer graphics techniques can be seamlessly integrated into a single framework for the acquisition, processing, and interactive display of high quality 3D models.
Export
BibTeX
@techreport{LenschGoeseleSeidel2001, TITLE = {A framework for the acquisition, processing and interactive display of high quality {3D} models}, AUTHOR = {Lensch, Hendrik P. A. and G{\"o}sele, Michael and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2001-4-005}, NUMBER = {MPI-I-2001-4-005}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2001}, DATE = {2001}, ABSTRACT = {This tutorial highlights some recent results on the acquisition and interactive display of high quality 3D models. For further use in photorealistic rendering or object recognition, a high quality representation must capture two different things: the shape of the model represented as a geometric description of its surface and on the other hand the appearance of the material or materials it is made of, e.g. the object's color, texture, or reflection properties. The tutorial shows how computer vision and computer graphics techniques can be seamlessly integrated into a single framework for the acquisition, processing, and interactive display of high quality 3D models.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Lensch, Hendrik P. A. %A G&#246;sele, Michael %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A framework for the acquisition, processing and interactive display of high quality 3D models : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-6CA1-C %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2001-4-005 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2001 %P 39 p. %X This tutorial highlights some recent results on the acquisition and interactive display of high quality 3D models. For further use in photorealistic rendering or object recognition, a high quality representation must capture two different things: the shape of the model represented as a geometric description of its surface and on the other hand the appearance of the material or materials it is made of, e.g. the object's color, texture, or reflection properties. The tutorial shows how computer vision and computer graphics techniques can be seamlessly integrated into a single framework for the acquisition, processing, and interactive display of high quality 3D models. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Lensch, H.P.A., Goesele, M., and Seidel, H.-P. 2001i. A Framework for the Acquisition, Processing and Interactive Display of High Quality 3D Models. Festschrift zum 60. Geburtstag von Wolfgang StraßerWSI-2001-20.
Export
BibTeX
@article{Lensch:FAP:2001, TITLE = {A Framework for the Acquisition, Processing and Interactive Display of High Quality {3D} Models}, AUTHOR = {Lensch, Hendrik P. A. and Goesele, Michael and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0946-3852}, LOCALID = {Local-ID: C125675300671F7B-695DD9E920317FC6C1256B3600286A59-Lensch:FAP:2001}, YEAR = {2001}, DATE = {2001}, JOURNAL = {Festschrift zum 60. Geburtstag von Wolfgang Stra{\ss}er}, VOLUME = {WSI-2001-20}, PAGES = {131--166}, }
Endnote
%0 Journal Article %A Lensch, Hendrik P. A. %A Goesele, Michael %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Framework for the Acquisition, Processing and Interactive Display of High Quality 3D Models : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-3261-A %F EDOC: 520241 %F OTHER: Local-ID: C125675300671F7B-695DD9E920317FC6C1256B3600286A59-Lensch:FAP:2001 %D 2001 %* Review method: peer-reviewed %J Festschrift zum 60. Geburtstag von Wolfgang Stra&#223;er %V WSI-2001-20 %& 131 %P 131 - 166 %@ false
Kobbelt, L., Botsch, M., Schwanecke, U., and Seidel, H.-P. 2001. Feature Sensitive Surface Extraction from Volume Data. Proceedings of the 28th Annual Conference on Computer Graphics and Interactive Techniques (SIGGRAPH 2001), ACM.
Export
BibTeX
@inproceedings{Kobbelt-et-al_SIGGRAPH01, TITLE = {Feature Sensitive Surface Extraction from Volume Data}, AUTHOR = {Kobbelt, Leif and Botsch, Mario and Schwanecke, Ulrich and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-58113-374-5}, DOI = {10.1145/383259.383265}, LOCALID = {Local-ID: C125675300671F7B-A6F57534A7F7730AC1256A9600468CCA-Kobbelt2001:FSSEVD}, PUBLISHER = {ACM}, YEAR = {2001}, DATE = {2001}, BOOKTITLE = {Proceedings of the 28th Annual Conference on Computer Graphics and Interactive Techniques (SIGGRAPH 2001)}, EDITOR = {Fiume, Eugene}, PAGES = {57--66}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Kobbelt, Leif %A Botsch, Mario %A Schwanecke, Ulrich %A Seidel, Hans-Peter %+ Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Feature Sensitive Surface Extraction from Volume Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-3291-B %F EDOC: 520223 %F OTHER: Local-ID: C125675300671F7B-A6F57534A7F7730AC1256A9600468CCA-Kobbelt2001:FSSEVD %R 10.1145/383259.383265 %D 2001 %B 28th Annual Conference on Computer Graphics and Interactive Techniques %Z date of event: 2001-08-12 - 2001-08-17 %C Los Angeles, CA, USA %B Proceedings of the 28th Annual Conference on Computer Graphics and Interactive Techniques %E Fiume, Eugene %P 57 - 66 %I ACM %@ 978-1-58113-374-5
Kautz, J. and Seidel, H.-P. 2001. Hardware Accelerated Displacement Mapping for Image Based Rendering. Proceedings Graphics Interface 2001 (GI 2001), Morgan Kaufmann.
Export
BibTeX
@inproceedings{Kautz-Seidel_GI01, TITLE = {Hardware Accelerated Displacement Mapping for Image Based Rendering}, AUTHOR = {Kautz, Jan and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-96888-080-0}, DOI = {10.20380/GI2001.08}, LOCALID = {Local-ID: C125675300671F7B-92AB0101FA00C15FC1256A800039E331-Kautz:2001:HAD}, PUBLISHER = {Morgan Kaufmann}, YEAR = {2001}, DATE = {2001}, BOOKTITLE = {Proceedings Graphics Interface 2001 (GI 2001)}, EDITOR = {Watson, Benjamin and Buchanan, John W.}, PAGES = {61--70}, ADDRESS = {Ottawa, Canada}, }
Endnote
%0 Conference Proceedings %A Kautz, Jan %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Hardware Accelerated Displacement Mapping for Image Based Rendering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-3299-C %F EDOC: 520212 %F OTHER: Local-ID: C125675300671F7B-92AB0101FA00C15FC1256A800039E331-Kautz:2001:HAD %R 10.20380/GI2001.08 %D 2001 %B Graphics Interface Conference 2001 %Z date of event: 2001-06-07 - 2001-06-09 %C Ottawa, Canada %B Proceedings Graphics Interface 2001 %E Watson, Benjamin; Buchanan, John W. %P 61 - 70 %I Morgan Kaufmann %@ 0-96888-080-0
Kautz, J., Heidrich, W., and Seidel, H.-P. 2001. Real-Time Bump Map Synthesis. Proceedings of the ACM Eurographics/SIGGRAPH Workshop on Graphics Hardware 2001, ACM.
Export
BibTeX
@inproceedings{Kautz-et-al_Eurographics/SIGGRAPH01, TITLE = {Real-Time Bump Map Synthesis}, AUTHOR = {Kautz, Jan and Heidrich, Wolfgang and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-58113-407-0}, DOI = {10.1145/383507.383534}, LOCALID = {Local-ID: C125675300671F7B-D31456275CA3471CC1256A800042E1DB-Kautz:2001:RTB}, PUBLISHER = {ACM}, YEAR = {2001}, DATE = {2001}, BOOKTITLE = {Proceedings of the ACM Eurographics/SIGGRAPH Workshop on Graphics Hardware 2001}, EDITOR = {Pfister, Hans-Peter}, PAGES = {109--114}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Kautz, Jan %A Heidrich, Wolfgang %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Real-Time Bump Map Synthesis : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-32C2-E %F EDOC: 520214 %F OTHER: Local-ID: C125675300671F7B-D31456275CA3471CC1256A800042E1DB-Kautz:2001:RTB %R 10.1145/383507.383534 %D 2001 %B The ACM Eurographics/SIGGRAPH Workshop on Graphics Hardware 2001 %Z date of event: 2001-08-12 - 2001-08-17 %C Los Angeles, CA, USA %B Proceedings of the ACM Eurographics/SIGGRAPH Workshop on Graphics Hardware 2001 %E Pfister, Hans-Peter %P 109 - 114 %I ACM %@ 978-1-58113-407-0
Kähler, K., Haber, J., and Seidel, H.-P. 2001a. Dynamic Refinement of Deformable Triangle Meshes for Rendering. Proceedings Computer Graphics International 2001, IEEE.
Abstract
We present a method to adaptively refine an irregular triangle mesh<br> as it deforms in real-time. The method increases surface smoothness<br> in regions of high deformation by splitting triangles in a fashion<br> similar to one or two steps of Loop subdivision. The refinement is<br> computed for an arbitrary triangle mesh and the subdivided triangles<br> are simply passed to the rendering engine, leaving the mesh itself<br> unchanged. The algorithm can thus be easily plugged into existing<br> systems to enhance visual appearance of animated meshes. The<br> refinement step has very low computational overhead and is easy to<br> implement. We demonstrate the use of the algorithm in our<br> physics-based facial animation system.
Export
BibTeX
@inproceedings{Kahler-et-al_CGI01, TITLE = {Dynamic Refinement of Deformable Triangle Meshes for Rendering}, AUTHOR = {K{\"a}hler, Kolja and Haber, J{\"o}rg and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-1007-8}, URL = {http://www.mpi-sb.mpg.de/resources/FAM/publ/cgi2001.pdf}, DOI = {10.1109/CGI.2001.934685}, LOCALID = {Local-ID: C125675300671F7B-CBF911A5EF6D5F6CC1256A71003B9B2D-Kaehler:DRDTMR}, PUBLISHER = {IEEE}, YEAR = {2001}, DATE = {2001}, ABSTRACT = {We present a method to adaptively refine an irregular triangle mesh<br> as it deforms in real-time. The method increases surface smoothness<br> in regions of high deformation by splitting triangles in a fashion<br> similar to one or two steps of Loop subdivision. The refinement is<br> computed for an arbitrary triangle mesh and the subdivided triangles<br> are simply passed to the rendering engine, leaving the mesh itself<br> unchanged. The algorithm can thus be easily plugged into existing<br> systems to enhance visual appearance of animated meshes. The<br> refinement step has very low computational overhead and is easy to<br> implement. We demonstrate the use of the algorithm in our<br> physics-based facial animation system.}, BOOKTITLE = {Proceedings Computer Graphics International 2001}, EDITOR = {Ip, Horace Ho-Shing and Magnenat-Thalmann, Nadia and Lau, Rynson W. H. and Chua, Tat-Seng}, PAGES = {285--290}, ADDRESS = {Hong Kong, China}, }
Endnote
%0 Conference Proceedings %A K&#228;hler, Kolja %A Haber, J&#246;rg %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Dynamic Refinement of Deformable Triangle Meshes for Rendering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-327A-3 %F EDOC: 520197 %U http://www.mpi-sb.mpg.de/resources/FAM/publ/cgi2001.pdf %F OTHER: Local-ID: C125675300671F7B-CBF911A5EF6D5F6CC1256A71003B9B2D-Kaehler:DRDTMR %R 10.1109/CGI.2001.934685 %D 2001 %B Computer Graphics International Conference 2001 %Z date of event: 2001-07-06 - 2001-07-06 %C Hong Kong, China %X We present a method to adaptively refine an irregular triangle mesh<br> as it deforms in real-time. The method increases surface smoothness<br> in regions of high deformation by splitting triangles in a fashion<br> similar to one or two steps of Loop subdivision. The refinement is<br> computed for an arbitrary triangle mesh and the subdivided triangles<br> are simply passed to the rendering engine, leaving the mesh itself<br> unchanged. The algorithm can thus be easily plugged into existing<br> systems to enhance visual appearance of animated meshes. The<br> refinement step has very low computational overhead and is easy to<br> implement. We demonstrate the use of the algorithm in our<br> physics-based facial animation system. %B Proceedings Computer Graphics International 2001 %E Ip, Horace Ho-Shing; Magnenat-Thalmann, Nadia; Lau, Rynson W. H.; Chua, Tat-Seng %P 285 - 290 %I IEEE %@ 0-7695-1007-8
Kähler, K., Haber, J., and Seidel, H.-P. 2001b. Geometry-based Muscle Modeling for Facial Animation. Proceedings Graphics Interface 2001 (GI 2001), Morgan Kaufmann.
Abstract
We present a muscle model and methods for muscle construction<br>that allow to easily create animatable facial models from given<br>face geometry. Using our editing tool, one can interactively<br>specify coarse outlines of the muscles, which are then<br>automatically created to fit the face geometry. <br><br>Our muscle model incorporates different types of muscles and the<br>effects of bulging and intertwining muscle fibers. The influence<br>of muscle contraction onto the skin is simulated using a<br>mass-spring system that connects the skull, muscle, and skin<br>layers of our model.
Export
BibTeX
@inproceedings{Kahler-et-al_GI01, TITLE = {Geometry-based Muscle Modeling for Facial Animation}, AUTHOR = {K{\"a}hler, Kolja and Haber, J{\"o}rg and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-9688808-0-0}, URL = {http://www.mpi-sb.mpg.de/resources/FAM/publ/gi2001.pdf}, DOI = {10.20380/GI2001.05}, LOCALID = {Local-ID: C125675300671F7B-735EF964CEA6408FC1256A69002BA5FD-Kaehler:GBMMFA:01}, PUBLISHER = {Morgan Kaufmann}, YEAR = {2001}, DATE = {2001}, ABSTRACT = {We present a muscle model and methods for muscle construction<br>that allow to easily create animatable facial models from given<br>face geometry. Using our editing tool, one can interactively<br>specify coarse outlines of the muscles, which are then<br>automatically created to fit the face geometry. <br><br>Our muscle model incorporates different types of muscles and the<br>effects of bulging and intertwining muscle fibers. The influence<br>of muscle contraction onto the skin is simulated using a<br>mass-spring system that connects the skull, muscle, and skin<br>layers of our model.}, BOOKTITLE = {Proceedings Graphics Interface 2001 (GI 2001)}, EDITOR = {Watson, Benjamin and Buchanan, John W.}, PAGES = {37--46}, ADDRESS = {Ottawa, Canada}, }
Endnote
%0 Conference Proceedings %A K&#228;hler, Kolja %A Haber, J&#246;rg %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Geometry-based Muscle Modeling for Facial Animation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-3297-0 %F EDOC: 520196 %U http://www.mpi-sb.mpg.de/resources/FAM/publ/gi2001.pdf %F OTHER: Local-ID: C125675300671F7B-735EF964CEA6408FC1256A69002BA5FD-Kaehler:GBMMFA:01 %R 10.20380/GI2001.05 %D 2001 %B Graphics Interface Conference 2001 %Z date of event: 2001-06-07 - 2001-06-09 %C Ottawa, Canada %X We present a muscle model and methods for muscle construction<br>that allow to easily create animatable facial models from given<br>face geometry. Using our editing tool, one can interactively<br>specify coarse outlines of the muscles, which are then<br>automatically created to fit the face geometry. <br><br>Our muscle model incorporates different types of muscles and the<br>effects of bulging and intertwining muscle fibers. The influence<br>of muscle contraction onto the skin is simulated using a<br>mass-spring system that connects the skull, muscle, and skin<br>layers of our model. %B Proceedings Graphics Interface 2001 %E Watson, Benjamin; Buchanan, John W. %P 37 - 46 %I Morgan Kaufmann %@ 0-9688808-0-0
Kähler, K., Rössl, C., Schneider, R., Vorsatz, J., and Seidel, H.-P. 2001c. Efficient Processing of Large 3D Meshes. Proceedings of the International Conference on Shape Modeling and Applications, IEEE.
Export
BibTeX
@inproceedings{Kahler-et-al_SMA01, TITLE = {Efficient Processing of Large {3D} Meshes}, AUTHOR = {K{\"a}hler, Kolja and R{\"o}ssl, Christian and Schneider, Robert and Vorsatz, Jens and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-0853-7}, DOI = {10.1109/SMA.2001.923394}, LOCALID = {Local-ID: C125675300671F7B-B2C81E4478C51B59C1256ABC00295899-SMI2001}, PUBLISHER = {IEEE}, YEAR = {2001}, DATE = {2001}, BOOKTITLE = {Proceedings of the International Conference on Shape Modeling and Applications}, EDITOR = {Pasko, Alexander and Spagnuolo, Michaela}, PAGES = {228--237}, ADDRESS = {Genova, Italy}, }
Endnote
%0 Conference Proceedings %A K&#228;hler, Kolja %A R&#246;ssl, Christian %A Schneider, Robert %A Vorsatz, Jens %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Efficient Processing of Large 3D Meshes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-3282-F %F EDOC: 520233 %F OTHER: Local-ID: C125675300671F7B-B2C81E4478C51B59C1256ABC00295899-SMI2001 %R 10.1109/SMA.2001.923394 %D 2001 %B 2001 International Conference on Shape Modeling and Applications %Z date of event: 2001-05-07 - 2001-05-11 %C Genova, Italy %B Proceedings of the International Conference on Shape Modeling and Applications %E Pasko, Alexander; Spagnuolo, Michaela %P 228 - 237 %I IEEE %@ 0-7695-0853-7
Haber, J., Myszkowski, K., Yamauchi, H., and Seidel, H.-P. 2001a. Perceptually Guided Corrective Splatting. Computer Graphics Forum, Blackwell.
Abstract
One of the basic difficulties with interactive walkthroughs is the high<br> quality rendering of object surfaces with non-diffuse light scattering<br> characteristics. Since full ray tracing at interactive rates is usually<br> impossible, we render a precomputed global illumination solution using<br> graphics hardware and use remaining computational power to correct the<br> appearance of non-diffuse objects on-the-fly. The question arises, how to<br> obtain the best image quality as perceived by a human observer within a<br> limited amount of time for each frame. We address this problem by<br> enforcing corrective computation for those non-diffuse objects that are<br> selected using a computational model of visual attention. We consider both<br> the saliency- and task-driven selection of those objects and benefit<br> from the fact that shading artifacts of ``unattended'' objects are likely<br> to remain unnoticed. We use a hierarchical image-space sampling scheme to<br> control ray tracing and splat the generated point samples. The resulting<br> image converges progressively to a ray traced solution if the viewing<br> parameters remain unchanged. Moreover, we use a sample cache to enhance<br> visual appearance if the time budget for correction has been too low for<br> some frame. We check the validity of the cached samples using a <br> novel criterion suited for non-diffuse surfaces and reproject valid<br> samples into the current view.
Export
BibTeX
@inproceedings{Haber-et-al_Eurograph.01, TITLE = {Perceptually Guided Corrective Splatting}, AUTHOR = {Haber, J{\"o}rg and Myszkowski, Karol and Yamauchi, Hitoshi and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/1467-8659.00507}, LOCALID = {Local-ID: C125675300671F7B-3992DB8541113439C1256A72003B9C5A-Haber:2001:PGCS}, PUBLISHER = {Blackwell}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2001}, DATE = {2001}, ABSTRACT = {One of the basic difficulties with interactive walkthroughs is the high<br> quality rendering of object surfaces with non-diffuse light scattering<br> characteristics. Since full ray tracing at interactive rates is usually<br> impossible, we render a precomputed global illumination solution using<br> graphics hardware and use remaining computational power to correct the<br> appearance of non-diffuse objects on-the-fly. The question arises, how to<br> obtain the best image quality as perceived by a human observer within a<br> limited amount of time for each frame. We address this problem by<br> enforcing corrective computation for those non-diffuse objects that are<br> selected using a computational model of visual attention. We consider both<br> the saliency- and task-driven selection of those objects and benefit<br> from the fact that shading artifacts of ``unattended'' objects are likely<br> to remain unnoticed. We use a hierarchical image-space sampling scheme to<br> control ray tracing and splat the generated point samples. The resulting<br> image converges progressively to a ray traced solution if the viewing<br> parameters remain unchanged. Moreover, we use a sample cache to enhance<br> visual appearance if the time budget for correction has been too low for<br> some frame. We check the validity of the cached samples using a <br> novel criterion suited for non-diffuse surfaces and reproject valid<br> samples into the current view.}, BOOKTITLE = {Proceedings of the Eurographics Conference 2001}, EDITOR = {Chalmers, Alan and Rhyne, Theresa-Marie}, PAGES = {142--153}, JOURNAL = {Computer Graphics Forum}, VOLUME = {20}, ISSUE = {3}, ADDRESS = {Manchester, UK}, }
Endnote
%0 Conference Proceedings %A Haber, J&#246;rg %A Myszkowski, Karol %A Yamauchi, Hitoshi %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptually Guided Corrective Splatting : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-32C0-1 %F EDOC: 520198 %F OTHER: Local-ID: C125675300671F7B-3992DB8541113439C1256A72003B9C5A-Haber:2001:PGCS %R 10.1111/1467-8659.00507 %D 2001 %B Eurographics Conference 2001 %Z date of event: 2001 - %C Manchester, UK %X One of the basic difficulties with interactive walkthroughs is the high<br> quality rendering of object surfaces with non-diffuse light scattering<br> characteristics. Since full ray tracing at interactive rates is usually<br> impossible, we render a precomputed global illumination solution using<br> graphics hardware and use remaining computational power to correct the<br> appearance of non-diffuse objects on-the-fly. The question arises, how to<br> obtain the best image quality as perceived by a human observer within a<br> limited amount of time for each frame. We address this problem by<br> enforcing corrective computation for those non-diffuse objects that are<br> selected using a computational model of visual attention. We consider both<br> the saliency- and task-driven selection of those objects and benefit<br> from the fact that shading artifacts of ``unattended'' objects are likely<br> to remain unnoticed. We use a hierarchical image-space sampling scheme to<br> control ray tracing and splat the generated point samples. The resulting<br> image converges progressively to a ray traced solution if the viewing<br> parameters remain unchanged. Moreover, we use a sample cache to enhance<br> visual appearance if the time budget for correction has been too low for<br> some frame. We check the validity of the cached samples using a <br> novel criterion suited for non-diffuse surfaces and reproject valid<br> samples into the current view. %B Proceedings of the Eurographics Conference 2001 %E Chalmers, Alan; Rhyne, Theresa-Marie %P 142 - 153 %I Blackwell %J Computer Graphics Forum %V 20 %N 3 %I Blackwell-Wiley %@ false
Haber, J., Kähler, K., Albrecht, I., Yamauchi, H., and Seidel, H.-P. 2001b. Face to Face: From Real Humans to Realistic Facial Animation. Proceedings of the 3rd Israel-Korea Binational Conference on Geometrical Modeling and Computer Graphics, Kyung Moon.
Abstract
We present a system for photo-realistic facial modeling and animation, which includes several tools that facilitate necessary tasks such as mesh processing, texture registration, and assembling of facial components. The resulting head model reflects the anatomical structure of the human head including skull, skin, and muscles. Semiautomatic generation of high-quality models from scan data for physics-based animation becomes possible with little effort. A state-of-the-art speech synchronization technique is integrated into our system, resulting in realistic speech animations that can be rendered at real-time frame rates on current PC hardware.
Export
BibTeX
@inproceedings{Haber:2001:F2F, TITLE = {Face to Face: From Real Humans to Realistic Facial Animation}, AUTHOR = {Haber, J{\"o}rg and K{\"a}hler, Kolja and Albrecht, Irene and Yamauchi, Hitoshi and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {89-7282-527-1}, LOCALID = {Local-ID: C125675300671F7B-8EC90DA876F6AE9DC1256B050066828B-Haber:2001:F2F}, PUBLISHER = {Kyung Moon}, YEAR = {2005}, DATE = {2001}, ABSTRACT = {We present a system for photo-realistic facial modeling and animation, which includes several tools that facilitate necessary tasks such as mesh processing, texture registration, and assembling of facial components. The resulting head model reflects the anatomical structure of the human head including skull, skin, and muscles. Semiautomatic generation of high-quality models from scan data for physics-based animation becomes possible with little effort. A state-of-the-art speech synchronization technique is integrated into our system, resulting in realistic speech animations that can be rendered at real-time frame rates on current PC hardware.}, BOOKTITLE = {Proceedings of the 3rd Israel-Korea Binational Conference on Geometrical Modeling and Computer Graphics}, PAGES = {73--82}, }
Endnote
%0 Conference Proceedings %A Haber, J&#246;rg %A K&#228;hler, Kolja %A Albrecht, Irene %A Yamauchi, Hitoshi %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Face to Face: From Real Humans to Realistic Facial Animation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-328C-C %F EDOC: 520240 %F OTHER: Local-ID: C125675300671F7B-8EC90DA876F6AE9DC1256B050066828B-Haber:2001:F2F %I Kyung Moon %D 2001 %B Untitled Event %Z date of event: 2005-10-26 - 2005-10-26 %C Seoul, Korea %X We present a system for photo-realistic facial modeling and animation, which includes several tools that facilitate necessary tasks such as mesh processing, texture registration, and assembling of facial components. The resulting head model reflects the anatomical structure of the human head including skull, skin, and muscles. Semiautomatic generation of high-quality models from scan data for physics-based animation becomes possible with little effort. A state-of-the-art speech synchronization technique is integrated into our system, resulting in realistic speech animations that can be rendered at real-time frame rates on current PC hardware. %B Proceedings of the 3rd Israel-Korea Binational Conference on Geometrical Modeling and Computer Graphics %P 73 - 82 %I Kyung Moon %@ 89-7282-527-1
Haber, J., Zeilfelder, F., Davydov, O., and Seidel, H.-P. 2001c. Smooth Approximation and Rendering of Large Scattered Data Sets. Proceedings of the 2001 IEEE Conference on Visualization, IEEE.
Abstract
We present an efficient method to automatically compute a smooth<br> approximation of large functional scattered data sets given over<br> arbitrarily shaped planar domains. Our approach is based on the<br> construction of a $C^1$-continuous bivariate cubic spline and our method<br> offers optimal approximation order. Both local<br> variation and non-uniform distribution of the data are taken into account<br> by using local polynomial least squares approximations of varying degree.<br> Since we only need to solve small linear systems and no triangulation of<br> the scattered data points is required, the overall complexity of the<br> algorithm is linear in the total number of points. Numerical examples<br> dealing with several real world scattered data sets with up to millions of<br> points demonstrate the efficiency of our method. The resulting spline<br> surface is of high visual quality and can be efficiently evaluated for<br> rendering and modeling. In our implementation we achieve real-time frame<br> rates for typical fly-through sequences and interactive frame rates for<br> recomputing and rendering a locally modified spline surface.
Export
BibTeX
@inproceedings{Haber-et-al_VIS01, TITLE = {Smooth Approximation and Rendering of Large Scattered Data Sets}, AUTHOR = {Haber, J{\"o}rg and Zeilfelder, Frank and Davydov, Oleg and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7803-7201-8}, DOI = {10.1109/VISUAL.2001.964530}, LOCALID = {Local-ID: C125675300671F7B-C54DCD759D580F67C1256A72003C0824-Haber:2001:SARLSDS}, PUBLISHER = {IEEE}, YEAR = {2001}, DATE = {2001}, ABSTRACT = {We present an efficient method to automatically compute a smooth<br> approximation of large functional scattered data sets given over<br> arbitrarily shaped planar domains. Our approach is based on the<br> construction of a $C^1$-continuous bivariate cubic spline and our method<br> offers optimal approximation order. Both local<br> variation and non-uniform distribution of the data are taken into account<br> by using local polynomial least squares approximations of varying degree.<br> Since we only need to solve small linear systems and no triangulation of<br> the scattered data points is required, the overall complexity of the<br> algorithm is linear in the total number of points. Numerical examples<br> dealing with several real world scattered data sets with up to millions of<br> points demonstrate the efficiency of our method. The resulting spline<br> surface is of high visual quality and can be efficiently evaluated for<br> rendering and modeling. In our implementation we achieve real-time frame<br> rates for typical fly-through sequences and interactive frame rates for<br> recomputing and rendering a locally modified spline surface.}, BOOKTITLE = {Proceedings of the 2001 IEEE Conference on Visualization}, EDITOR = {Ertl, Thomas and Joy, Ken and Varshney, Amitabh}, PAGES = {341--347;571}, ADDRESS = {San Diego, CA, USA}, }
Endnote
%0 Conference Proceedings %A Haber, J&#246;rg %A Zeilfelder, Frank %A Davydov, Oleg %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Smooth Approximation and Rendering of Large Scattered Data Sets : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-32CC-9 %F EDOC: 520199 %F OTHER: Local-ID: C125675300671F7B-C54DCD759D580F67C1256A72003C0824-Haber:2001:SARLSDS %R 10.1109/VISUAL.2001.964530 %D 2001 %B 2001 IEEE Conference on Visualization %Z date of event: 2001-10-21 - 2001-10-26 %C San Diego, CA, USA %X We present an efficient method to automatically compute a smooth<br> approximation of large functional scattered data sets given over<br> arbitrarily shaped planar domains. Our approach is based on the<br> construction of a $C^1$-continuous bivariate cubic spline and our method<br> offers optimal approximation order. Both local<br> variation and non-uniform distribution of the data are taken into account<br> by using local polynomial least squares approximations of varying degree.<br> Since we only need to solve small linear systems and no triangulation of<br> the scattered data points is required, the overall complexity of the<br> algorithm is linear in the total number of points. Numerical examples<br> dealing with several real world scattered data sets with up to millions of<br> points demonstrate the efficiency of our method. The resulting spline<br> surface is of high visual quality and can be efficiently evaluated for<br> rendering and modeling. In our implementation we achieve real-time frame<br> rates for typical fly-through sequences and interactive frame rates for<br> recomputing and rendering a locally modified spline surface. %B Proceedings of the 2001 IEEE Conference on Visualization %E Ertl, Thomas; Joy, Ken; Varshney, Amitabh %P 341 - 347;571 %I IEEE %@ 0-7803-7201-8
Goesele, M., Heidrich, W., and Seidel, H.-P. 2001a. Entropy-Based Dark Frame Subtraction. Proceedings of PICS 2001: Image Processing, Image Quality, Image Capture, Systems Conference, IS&T.
Abstract
Noise due to dark current is a serious limitation for taking<br>long exposure time images with a CCD digital camera. Current<br>solutions have serious drawbacks: interpolation of pixels with high<br>dark current leads to smoothing effects or other artifacts --<br>especially if a large number of pixels are corrupted. Due to the<br>exponential temperature dependence of the dark current, dark frame<br>subtraction works best for temperature controlled high end CCD imaging<br>systems.<br><br>On the physical level, two independent signals (charge generated by<br>photons hitting the CCD and by the dark current) are added. Due to its<br>random distribution, adding (or subtracting) the dark current noise<br>signal increases the entropy of the resulting image. The entropy is<br>minimal if the dark current signal is not present at all.<br><br>A dark frame is a good representation of the dark current noise. As<br>the generated dark current depends on the temperature equally for all<br>pixels, a noisy image can be cleaned by the subtraction of a scaled<br>dark frame. The scaling factor can be determined in an optimization<br>step which tries to minimize the entropy of the cleaned image.<br><br>We implemented a software system that effectively removes dark current<br>noise even from highly corrupted images. The resulting images contain<br>almost no visible artifacts since only the noise signal is removed. This<br>extends the range of usable exposure times of digital cameras without<br>temperature control systems by about one to two orders of magnitude.
Export
BibTeX
@inproceedings{Goesele-et-al_PICS01, TITLE = {Entropy-Based Dark Frame Subtraction}, AUTHOR = {Goesele, Michael and Heidrich, Wolfgang and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-89208-232-1}, LOCALID = {Local-ID: C125675300671F7B-F3BC118F7C160FC0C12569DE0053741B-Goesele:2001:EBD}, PUBLISHER = {IS\&T}, YEAR = {2001}, DATE = {2001}, ABSTRACT = {Noise due to dark current is a serious limitation for taking<br>long exposure time images with a CCD digital camera. Current<br>solutions have serious drawbacks: interpolation of pixels with high<br>dark current leads to smoothing effects or other artifacts --<br>especially if a large number of pixels are corrupted. Due to the<br>exponential temperature dependence of the dark current, dark frame<br>subtraction works best for temperature controlled high end CCD imaging<br>systems.<br><br>On the physical level, two independent signals (charge generated by<br>photons hitting the CCD and by the dark current) are added. Due to its<br>random distribution, adding (or subtracting) the dark current noise<br>signal increases the entropy of the resulting image. The entropy is<br>minimal if the dark current signal is not present at all.<br><br>A dark frame is a good representation of the dark current noise. As<br>the generated dark current depends on the temperature equally for all<br>pixels, a noisy image can be cleaned by the subtraction of a scaled<br>dark frame. The scaling factor can be determined in an optimization<br>step which tries to minimize the entropy of the cleaned image.<br><br>We implemented a software system that effectively removes dark current<br>noise even from highly corrupted images. The resulting images contain<br>almost no visible artifacts since only the noise signal is removed. This<br>extends the range of usable exposure times of digital cameras without<br>temperature control systems by about one to two orders of magnitude.}, BOOKTITLE = {Proceedings of PICS 2001: Image Processing, Image Quality, Image Capture, Systems Conference}, PAGES = {293--298}, ADDRESS = {Montreal, Canada}, }
Endnote
%0 Conference Proceedings %A Goesele, Michael %A Heidrich, Wolfgang %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Entropy-Based Dark Frame Subtraction : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-3287-5 %F EDOC: 520163 %F OTHER: Local-ID: C125675300671F7B-F3BC118F7C160FC0C12569DE0053741B-Goesele:2001:EBD %D 2001 %B Image Processing, Image Quality, Image Capture, Systems Conference 2001 %Z date of event: 2001-04-22 - 2001-04-25 %C Montreal, Canada %X Noise due to dark current is a serious limitation for taking<br>long exposure time images with a CCD digital camera. Current<br>solutions have serious drawbacks: interpolation of pixels with high<br>dark current leads to smoothing effects or other artifacts --<br>especially if a large number of pixels are corrupted. Due to the<br>exponential temperature dependence of the dark current, dark frame<br>subtraction works best for temperature controlled high end CCD imaging<br>systems.<br><br>On the physical level, two independent signals (charge generated by<br>photons hitting the CCD and by the dark current) are added. Due to its<br>random distribution, adding (or subtracting) the dark current noise<br>signal increases the entropy of the resulting image. The entropy is<br>minimal if the dark current signal is not present at all.<br><br>A dark frame is a good representation of the dark current noise. As<br>the generated dark current depends on the temperature equally for all<br>pixels, a noisy image can be cleaned by the subtraction of a scaled<br>dark frame. The scaling factor can be determined in an optimization<br>step which tries to minimize the entropy of the cleaned image.<br><br>We implemented a software system that effectively removes dark current<br>noise even from highly corrupted images. The resulting images contain<br>almost no visible artifacts since only the noise signal is removed. This<br>extends the range of usable exposure times of digital cameras without<br>temperature control systems by about one to two orders of magnitude. %B Proceedings of PICS 2001: Image Processing, Image Quality, Image Capture, Systems Conference %P 293 - 298 %I IS&T %@ 0-89208-232-1
Goesele, M., Heidrich, W., and Seidel, H.-P. 2001b. Color Calibrated High Dynamic Range Imaging with ICC Profiles. Proceedings of the 9th Color Imaging Conference (CIC 2001), Society for Imaging Science and Technology.
Abstract
High dynamic range (HDR) imaging has become a powerful tool in<br>computer graphics, and is being applied to scenarios like<br>simulation of different film responses, motion blur, and<br>image-based illumination. The HDR images for these applications<br>are typically generated by combining the information from multiple<br>photographs taken at different exposure settings.<br><br>Unfortunately, the color calibration of these images has so far<br>been limited to very simplistic approaches such as a simple white<br>balance algorithm. More sophisticated methods used for<br>device-independent color representations are not easily applicable<br>because they inherently assume a limited dynamic range. In this<br>paper, we introduce a novel approach for constructing HDR images<br>directly from low dynamic range images that were calibrated using<br>an ICC input profile.
Export
BibTeX
@inproceedings{Goesele-et-al_CIC01, TITLE = {Color Calibrated High Dynamic Range Imaging with {ICC} Profiles}, AUTHOR = {Goesele, Michael and Heidrich, Wolfgang and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-89208-235-6}, DOI = {10.2352/CIC.2001.9.1.art00052}, LOCALID = {Local-ID: C125675300671F7B-1218A9170C98921DC1256A8D0042885A-Goesele:2001:CCH}, PUBLISHER = {Society for Imaging Science and Technology}, YEAR = {2001}, DATE = {2001}, ABSTRACT = {High dynamic range (HDR) imaging has become a powerful tool in<br>computer graphics, and is being applied to scenarios like<br>simulation of different film responses, motion blur, and<br>image-based illumination. The HDR images for these applications<br>are typically generated by combining the information from multiple<br>photographs taken at different exposure settings.<br><br>Unfortunately, the color calibration of these images has so far<br>been limited to very simplistic approaches such as a simple white<br>balance algorithm. More sophisticated methods used for<br>device-independent color representations are not easily applicable<br>because they inherently assume a limited dynamic range. In this<br>paper, we introduce a novel approach for constructing HDR images<br>directly from low dynamic range images that were calibrated using<br>an ICC input profile.}, BOOKTITLE = {Proceedings of the 9th Color Imaging Conference (CIC 2001)}, PAGES = {286--290}, ADDRESS = {Scottsdale, USA}, }
Endnote
%0 Conference Proceedings %A Goesele, Michael %A Heidrich, Wolfgang %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Color Calibrated High Dynamic Range Imaging with ICC Profiles : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-3274-F %F EDOC: 520219 %F OTHER: Local-ID: C125675300671F7B-1218A9170C98921DC1256A8D0042885A-Goesele:2001:CCH %R 10.2352/CIC.2001.9.1.art00052 %D 2001 %B The 9th Color Imaging Conference %Z date of event: 2001-11-06 - 2001-11-09 %C Scottsdale, USA %X High dynamic range (HDR) imaging has become a powerful tool in<br>computer graphics, and is being applied to scenarios like<br>simulation of different film responses, motion blur, and<br>image-based illumination. The HDR images for these applications<br>are typically generated by combining the information from multiple<br>photographs taken at different exposure settings.<br><br>Unfortunately, the color calibration of these images has so far<br>been limited to very simplistic approaches such as a simple white<br>balance algorithm. More sophisticated methods used for<br>device-independent color representations are not easily applicable<br>because they inherently assume a limited dynamic range. In this<br>paper, we introduce a novel approach for constructing HDR images<br>directly from low dynamic range images that were calibrated using<br>an ICC input profile. %B Proceedings of the 9th Color Imaging Conference %P 286 - 290 %I Society for Imaging Science and Technology %@ 0-89208-235-6
Ertl, T., Girod, B., Greiner, G., Niemann, H., and Seidel, H.-P., eds. 2001. Vision, Modeling and Visualization 2001 (VMV-2001). Akademische Verlagsgesellschaft Aka.
Export
BibTeX
@proceedings{VMV2001, TITLE = {Vision, Modeling and Visualization 2001 (VMV-2001)}, EDITOR = {Ertl, Thomas and Girod, Bernd and Greiner, G{\"u}nther and Niemann, Heinrich and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-89838-028-9}, LOCALID = {Local-ID: C125675300671F7B-59DD35FDBE934AD8C1256ACD004CB0B2-VMV2001}, PUBLISHER = {Akademische Verlagsgesellschaft Aka}, YEAR = {2001}, DATE = {2001}, PAGES = {532}, ADDRESS = {Stuttgart, Germany}, }
Endnote
%0 Conference Proceedings %E Ertl, Thomas %E Girod, Bernd %E Greiner, G&#252;nther %E Niemann, Heinrich %E Seidel, Hans-Peter %+ External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Vision, Modeling and Visualization 2001 (VMV-2001) : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-32DA-9 %F EDOC: 520238 %F OTHER: Local-ID: C125675300671F7B-59DD35FDBE934AD8C1256ACD004CB0B2-VMV2001 %@ 3-89838-028-9 %I Akademische Verlagsgesellschaft Aka %D 2001 %B 6th International Fall Workshop on Vision, Modeling, and Visualization %Z date of event: 2001-11-21 - 2001-11-23 %D 2001 %C Stuttgart, Germany %P 532
Daubert, K., Lensch, H.P.A., Heidrich, W., and Seidel, H.-P. 2001a. Efficient Cloth Modeling and Rendering. Rendering Techniques 2001 (EGSR 2001), Springer.
Abstract
Realistic modeling and high-performance rendering of cloth and<br>clothing is a challenging problem. Often these materials are seen<br>at distances where individual stitches and knits can be made out<br>and need to be accounted for. Modeling of the geometry at this<br>level of detail fails due to sheer complexity, while simple<br>texture mapping techniques do not produce the desired quality.<br> <br>In this paper, we describe an efficient and realistic approach<br>that takes into account view-dependent effects such as small<br>displacements causing occlusion and shadows, as well as<br>illumination effects. The method is efficient in terms of memory<br>consumption, and uses a combination of hardware and software<br>rendering to achieve high performance. It is conceivable that<br>future graphics hardware will be flexible enough for full<br>hardware rendering of the proposed method.
Export
BibTeX
@inproceedings{Daubert-et-al_EGSR01, TITLE = {Efficient Cloth Modeling and Rendering}, AUTHOR = {Daubert, Katja and Lensch, Hendrik P. A. and Heidrich, Wolfgang and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0946-2767}, ISBN = {978-3-211-83709-2}, DOI = {10.1007/978-3-7091-6242-2_6}, LOCALID = {Local-ID: C125675300671F7B-FBC662E15414073CC1256A7D00509B96-Daubert2001}, PUBLISHER = {Springer}, YEAR = {2001}, DATE = {2001}, ABSTRACT = {Realistic modeling and high-performance rendering of cloth and<br>clothing is a challenging problem. Often these materials are seen<br>at distances where individual stitches and knits can be made out<br>and need to be accounted for. Modeling of the geometry at this<br>level of detail fails due to sheer complexity, while simple<br>texture mapping techniques do not produce the desired quality.<br> <br>In this paper, we describe an efficient and realistic approach<br>that takes into account view-dependent effects such as small<br>displacements causing occlusion and shadows, as well as<br>illumination effects. The method is efficient in terms of memory<br>consumption, and uses a combination of hardware and software<br>rendering to achieve high performance. It is conceivable that<br>future graphics hardware will be flexible enough for full<br>hardware rendering of the proposed method.}, BOOKTITLE = {Rendering Techniques 2001 (EGSR 2001)}, EDITOR = {Myszkowski, Karol}, PAGES = {63--70}, SERIES = {Eurographics}, ADDRESS = {London, UK}, }
Endnote
%0 Conference Proceedings %A Daubert, Katja %A Lensch, Hendrik P. A. %A Heidrich, Wolfgang %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Efficient Cloth Modeling and Rendering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-327C-0 %F EDOC: 520206 %F OTHER: Local-ID: C125675300671F7B-FBC662E15414073CC1256A7D00509B96-Daubert2001 %R 10.1007/978-3-7091-6242-2_6 %D 2001 %B 12th Eurographics Workshop on Rendering Techniques %Z date of event: 2001-06-25 - 2001-06-27 %C London, UK %X Realistic modeling and high-performance rendering of cloth and<br>clothing is a challenging problem. Often these materials are seen<br>at distances where individual stitches and knits can be made out<br>and need to be accounted for. Modeling of the geometry at this<br>level of detail fails due to sheer complexity, while simple<br>texture mapping techniques do not produce the desired quality.<br> <br>In this paper, we describe an efficient and realistic approach<br>that takes into account view-dependent effects such as small<br>displacements causing occlusion and shadows, as well as<br>illumination effects. The method is efficient in terms of memory<br>consumption, and uses a combination of hardware and software<br>rendering to achieve high performance. It is conceivable that<br>future graphics hardware will be flexible enough for full<br>hardware rendering of the proposed method. %B Rendering Techniques 2001 %E Myszkowski, Karol %P 63 - 70 %I Springer %@ 978-3-211-83709-2 %B Eurographics %@ false
Daubert, K., Heinrich, W., Kautz, J., Dischler, J.-M., and Seidel, H.-P. 2001b. Efficient light transport using precomputed visibility. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
Visibility computations are the most time-consuming part of global illumination algorithms. The cost is amplified by the fact that quite often identical or similar information is recomputed multiple times. In particular this is the case when multiple images of the same scene are to be generated under varying lighting conditions and/or viewpoints. But even for a single image with static illumination, the computations could be accelerated by reusing visibility information for many different light paths. In this report we describe a general method of precomputing, storing, and reusing visibility information for light transport in a number of different types of scenes. In particular, we consider general parametric surfaces, triangle meshes without a global parameterization, and participating media. We also reorder the light transport in such a way that the visibility information is accessed in structured memory access patterns. This yields a method that is well suited for SIMD-style parallelization of the light transport, and can efficiently be implemented both in software and using graphics hardware. We finally demonstrate applications of the method to highly efficient precomputation of BRDFs, bidirectional texture functions, light fields, as well as near-interactive volume lighting.
Export
BibTeX
@techreport{DaubertHeidrichKautzDischlerSeidel2001, TITLE = {Efficient light transport using precomputed visibility}, AUTHOR = {Daubert, Katja and Heinrich, Wolfgang and Kautz, Jan and Dischler, Jean-Michel and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2001-4-003}, NUMBER = {MPI-I-2001-4-003}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2001}, DATE = {2001}, ABSTRACT = {Visibility computations are the most time-consuming part of global illumination algorithms. The cost is amplified by the fact that quite often identical or similar information is recomputed multiple times. In particular this is the case when multiple images of the same scene are to be generated under varying lighting conditions and/or viewpoints. But even for a single image with static illumination, the computations could be accelerated by reusing visibility information for many different light paths. In this report we describe a general method of precomputing, storing, and reusing visibility information for light transport in a number of different types of scenes. In particular, we consider general parametric surfaces, triangle meshes without a global parameterization, and participating media. We also reorder the light transport in such a way that the visibility information is accessed in structured memory access patterns. This yields a method that is well suited for SIMD-style parallelization of the light transport, and can efficiently be implemented both in software and using graphics hardware. We finally demonstrate applications of the method to highly efficient precomputation of BRDFs, bidirectional texture functions, light fields, as well as near-interactive volume lighting.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Daubert, Katja %A Heinrich, Wolfgang %A Kautz, Jan %A Dischler, Jean-Michel %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Efficient light transport using precomputed visibility : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-6CA7-F %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2001-4-003 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2001 %P 32 p. %X Visibility computations are the most time-consuming part of global illumination algorithms. The cost is amplified by the fact that quite often identical or similar information is recomputed multiple times. In particular this is the case when multiple images of the same scene are to be generated under varying lighting conditions and/or viewpoints. But even for a single image with static illumination, the computations could be accelerated by reusing visibility information for many different light paths. In this report we describe a general method of precomputing, storing, and reusing visibility information for light transport in a number of different types of scenes. In particular, we consider general parametric surfaces, triangle meshes without a global parameterization, and participating media. We also reorder the light transport in such a way that the visibility information is accessed in structured memory access patterns. This yields a method that is well suited for SIMD-style parallelization of the light transport, and can efficiently be implemented both in software and using graphics hardware. We finally demonstrate applications of the method to highly efficient precomputation of BRDFs, bidirectional texture functions, light fields, as well as near-interactive volume lighting. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Choi, S.W. and Seidel, H.-P. 2001a. One-sided Stability of Medial Axis Transform. Pattern Recognition (DAGM 2001), Springer.
Abstract
Medial axis transform (MAT)<br> is very sensitive to the noise,<br> in the sense that, even if a shape<br> is perturbed only slightly,<br> the Hausdorff distance between the<br> MATs of the original shape and the perturbed one<br> may be large.<br>But it turns out that MAT is stable,<br> if we view this phenomenon with the one-sided Hausdorff<br> distance, rather than with the two-sided Hausdorff distance.<br>In this paper, we show that,<br> if the original domain is weakly injective,<br> which means that the MAT of the domain has no end point which<br> is the center of an inscribed circle osculating the boundary at<br> only one point,<br> the one-sided Hausdorff distance of the original domain's MAT<br> with respect to that of the perturbed one <br> is bounded linearly<br> with the Hausdorff distance of the perturbation.<br>We also show by example that the linearity of this bound<br> cannot be achieved for the domains which are not weakly injective.<br>In particular, these results<br> apply to <br> the domains with the sharp corners,<br> which were excluded in the past.<br>One consequence of these results is that<br> we can clarify theoretically<br> the notion of extracting ``the essential part of the MAT'',<br> which is the heart of the existing pruning methods.
Export
BibTeX
@inproceedings{Choi-Seidel_DAGM01, TITLE = {One-sided Stability of Medial Axis Transform}, AUTHOR = {Choi, Sung Woo and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-540-42596-0}, DOI = {10.1007/3-540-45404-7_18}, LOCALID = {Local-ID: C125675300671F7B-09AA63AD30214C0EC1256A8D003B0425-ChoiSeidel2001b}, PUBLISHER = {Springer}, YEAR = {2001}, DATE = {2001}, ABSTRACT = {Medial axis transform (MAT)<br> is very sensitive to the noise,<br> in the sense that, even if a shape<br> is perturbed only slightly,<br> the Hausdorff distance between the<br> MATs of the original shape and the perturbed one<br> may be large.<br>But it turns out that MAT is stable,<br> if we view this phenomenon with the one-sided Hausdorff<br> distance, rather than with the two-sided Hausdorff distance.<br>In this paper, we show that,<br> if the original domain is weakly injective,<br> which means that the MAT of the domain has no end point which<br> is the center of an inscribed circle osculating the boundary at<br> only one point,<br> the one-sided Hausdorff distance of the original domain's MAT<br> with respect to that of the perturbed one <br> is bounded linearly<br> with the Hausdorff distance of the perturbation.<br>We also show by example that the linearity of this bound<br> cannot be achieved for the domains which are not weakly injective.<br>In particular, these results<br> apply to <br> the domains with the sharp corners,<br> which were excluded in the past.<br>One consequence of these results is that<br> we can clarify theoretically<br> the notion of extracting ``the essential part of the MAT'',<br> which is the heart of the existing pruning methods.}, BOOKTITLE = {Pattern Recognition (DAGM 2001)}, EDITOR = {Radig, Bernd and Florczyk, Stefan}, PAGES = {132--139}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {2191}, ADDRESS = {Munich, Germany}, }
Endnote
%0 Conference Proceedings %A Choi, Sung Woo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T One-sided Stability of Medial Axis Transform : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-32B6-A %F EDOC: 520218 %F OTHER: Local-ID: C125675300671F7B-09AA63AD30214C0EC1256A8D003B0425-ChoiSeidel2001b %R 10.1007/3-540-45404-7_18 %D 2001 %B 23rd DAGM Symposium on Pattern Recognition %Z date of event: 2001-09-12 - 2001-09-14 %C Munich, Germany %X Medial axis transform (MAT)<br> is very sensitive to the noise,<br> in the sense that, even if a shape<br> is perturbed only slightly,<br> the Hausdorff distance between the<br> MATs of the original shape and the perturbed one<br> may be large.<br>But it turns out that MAT is stable,<br> if we view this phenomenon with the one-sided Hausdorff<br> distance, rather than with the two-sided Hausdorff distance.<br>In this paper, we show that,<br> if the original domain is weakly injective,<br> which means that the MAT of the domain has no end point which<br> is the center of an inscribed circle osculating the boundary at<br> only one point,<br> the one-sided Hausdorff distance of the original domain's MAT<br> with respect to that of the perturbed one <br> is bounded linearly<br> with the Hausdorff distance of the perturbation.<br>We also show by example that the linearity of this bound<br> cannot be achieved for the domains which are not weakly injective.<br>In particular, these results<br> apply to <br> the domains with the sharp corners,<br> which were excluded in the past.<br>One consequence of these results is that<br> we can clarify theoretically<br> the notion of extracting ``the essential part of the MAT'',<br> which is the heart of the existing pruning methods. %B Pattern Recognition %E Radig, Bernd; Florczyk, Stefan %P 132 - 139 %I Springer %@ 978-3-540-42596-0 %B Lecture Notes in Computer Science %N 2191 %U https://rdcu.be/dyNkt
Choi, S.W. and Seidel, H.-P. 2001b. One-sided Stability of MAT and its Applications. Vision, Modeling and Visualization 2001 (VMV 2001), Akademische Verlagsgesellschaft Aka.
Abstract
Although useful in many applications,<br> the medial axis transform (MAT) has a few fit-falls,<br> one of which is its extreme sensitivity to the boundary<br> perturbation.<br>In this paper, we first summarizes the previous attempts to <br> get around this by bounding the one-sided Hausdorff distance<br> of the MAT with respect to the boundary perturbation.<br>We illustrate these results and their optimality with various examples.<br>Finally, we suggest an application of them in pruning.<br>In particular, we discuss the advantage of the results for the domains<br> which are not weakly injective, over those for the weakly injective<br> ones.
Export
BibTeX
@inproceedings{Choi-Seidel_VMV01, TITLE = {One-sided Stability of {MAT} and its Applications}, AUTHOR = {Choi, Sung Woo and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-89838-028-9}, LOCALID = {Local-ID: C125675300671F7B-897A41C25B5031ADC1256A8D004B1373-ChoiSeidel2001c}, PUBLISHER = {Akademische Verlagsgesellschaft Aka}, YEAR = {2001}, DATE = {2001}, ABSTRACT = {Although useful in many applications,<br> the medial axis transform (MAT) has a few fit-falls,<br> one of which is its extreme sensitivity to the boundary<br> perturbation.<br>In this paper, we first summarizes the previous attempts to <br> get around this by bounding the one-sided Hausdorff distance<br> of the MAT with respect to the boundary perturbation.<br>We illustrate these results and their optimality with various examples.<br>Finally, we suggest an application of them in pruning.<br>In particular, we discuss the advantage of the results for the domains<br> which are not weakly injective, over those for the weakly injective<br> ones.}, BOOKTITLE = {Vision, Modeling and Visualization 2001 (VMV 2001)}, EDITOR = {Ertl, Thomas and Girod, Bernd and Greiner, G{\"u}nther and Niemann, Heinrich and Seidel, Hans-Peter}, PAGES = {291--298}, ADDRESS = {Stuttgart, Germany}, }
Endnote
%0 Conference Proceedings %A Choi, Sung Woo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T One-sided Stability of MAT and its Applications : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-32B4-E %F EDOC: 520220 %F OTHER: Local-ID: C125675300671F7B-897A41C25B5031ADC1256A8D004B1373-ChoiSeidel2001c %D 2001 %B 6th International Fall Workshop on Vision, Modeling, and Visualization %Z date of event: 2001-11-21 - 2001-11-23 %C Stuttgart, Germany %X Although useful in many applications,<br> the medial axis transform (MAT) has a few fit-falls,<br> one of which is its extreme sensitivity to the boundary<br> perturbation.<br>In this paper, we first summarizes the previous attempts to <br> get around this by bounding the one-sided Hausdorff distance<br> of the MAT with respect to the boundary perturbation.<br>We illustrate these results and their optimality with various examples.<br>Finally, we suggest an application of them in pruning.<br>In particular, we discuss the advantage of the results for the domains<br> which are not weakly injective, over those for the weakly injective<br> ones. %B Vision, Modeling and Visualization 2001 %E Ertl, Thomas; Girod, Bernd; Greiner, G&#252;nther; Niemann, Heinrich; Seidel, Hans-Peter %P 291 - 298 %I Akademische Verlagsgesellschaft Aka %@ 3-89838-028-9
Choi, S.W. and Seidel, H.-P. 2001c. Linear one-sided stability of MAT for weakly injective domain. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
Medial axis transform (MAT) is very sensitive to the noise, in the sense that, even if a shape is perturbed only slightly, the Hausdorff distance between the MATs of the original shape and the perturbed one may be large. But it turns out that MAT is stable, if we view this phenomenon with the one-sided Hausdorff distance, rather than with the two-sided Hausdorff distance. In this paper, we show that, if the original domain is weakly injective, which means that the MAT of the domain has no end point which is the center of an inscribed circle osculating the boundary at only one point, the one-sided Hausdorff distance of the original domain's MAT with respect to that of the perturbed one is bounded linearly with the Hausdorff distance of the perturbation. We also show by example that the linearity of this bound cannot be achieved for the domains which are not weakly injective. In particular, these results apply to the domains with the sharp corners, which were excluded in the past. One consequence of these results is that we can clarify theoretically the notion of extracting ``the essential part of the MAT'', which is the heart of the existing pruning methods.
Export
BibTeX
@techreport{ChoiSeidel2001, TITLE = {Linear one-sided stability of {MAT} for weakly injective domain}, AUTHOR = {Choi, Sung Woo and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2001-4-004}, NUMBER = {MPI-I-2001-4-004}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2001}, DATE = {2001}, ABSTRACT = {Medial axis transform (MAT) is very sensitive to the noise, in the sense that, even if a shape is perturbed only slightly, the Hausdorff distance between the MATs of the original shape and the perturbed one may be large. But it turns out that MAT is stable, if we view this phenomenon with the one-sided Hausdorff distance, rather than with the two-sided Hausdorff distance. In this paper, we show that, if the original domain is weakly injective, which means that the MAT of the domain has no end point which is the center of an inscribed circle osculating the boundary at only one point, the one-sided Hausdorff distance of the original domain's MAT with respect to that of the perturbed one is bounded linearly with the Hausdorff distance of the perturbation. We also show by example that the linearity of this bound cannot be achieved for the domains which are not weakly injective. In particular, these results apply to the domains with the sharp corners, which were excluded in the past. One consequence of these results is that we can clarify theoretically the notion of extracting ``the essential part of the MAT'', which is the heart of the existing pruning methods.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Choi, Sung Woo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Linear one-sided stability of MAT for weakly injective domain : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-6CA4-6 %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2001-4-004 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2001 %P 18 p. %X Medial axis transform (MAT) is very sensitive to the noise, in the sense that, even if a shape is perturbed only slightly, the Hausdorff distance between the MATs of the original shape and the perturbed one may be large. But it turns out that MAT is stable, if we view this phenomenon with the one-sided Hausdorff distance, rather than with the two-sided Hausdorff distance. In this paper, we show that, if the original domain is weakly injective, which means that the MAT of the domain has no end point which is the center of an inscribed circle osculating the boundary at only one point, the one-sided Hausdorff distance of the original domain's MAT with respect to that of the perturbed one is bounded linearly with the Hausdorff distance of the perturbation. We also show by example that the linearity of this bound cannot be achieved for the domains which are not weakly injective. In particular, these results apply to the domains with the sharp corners, which were excluded in the past. One consequence of these results is that we can clarify theoretically the notion of extracting ``the essential part of the MAT'', which is the heart of the existing pruning methods. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Choi, S.W. and Seidel, H.-P. 2001d. Hyperbolic Hausdorff Distance for Medial Axis Transform. Graphical Models63, 5.
Export
BibTeX
@article{Choi-Seidel_Graph.Mod.01, TITLE = {Hyperbolic Hausdorff Distance for Medial Axis Transform}, AUTHOR = {Choi, Sung Woo and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1524-0703}, DOI = {10.1006/gmod.2001.0556}, LOCALID = {Local-ID: C125675300671F7B-C2A9EEBCB8B76810C1256AB70043138A-ChoiSeidel2001d}, PUBLISHER = {Academic Press}, ADDRESS = {San Diego, Calif.}, YEAR = {2001}, DATE = {2001}, JOURNAL = {Graphical Models}, VOLUME = {63}, NUMBER = {5}, PAGES = {369--384}, }
Endnote
%0 Journal Article %A Choi, Sung Woo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Hyperbolic Hausdorff Distance for Medial Axis Transform : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-329E-2 %F EDOC: 520230 %F OTHER: Local-ID: C125675300671F7B-C2A9EEBCB8B76810C1256AB70043138A-ChoiSeidel2001d %R 10.1006/gmod.2001.0556 %D 2001 %* Review method: peer-reviewed %J Graphical Models %V 63 %N 5 %& 369 %P 369 - 384 %I Academic Press %C San Diego, Calif. %@ false
Brabec, S. and Seidel, H.-P. 2001. Hardware-accelerated Rendering of Antialiased Shadows with Shadow Maps. Proceedings Computer Graphics International 2001 (CGI 2001), IEEE.
Abstract
We present a hardware-accelerated method for rendering high quality,<br>antialiased shadows using the shadow map approach. Instead of relying <br>on dedicated hardware support for shadow map filtering, we propose<br>a general rendering algorithm that can be used on most graphics <br>workstations. The filtering method softens shadow boundaries by using<br>a technique called percentage closer filtering which is commonly used<br>in software renderers, e.g ray tracing. In this paper we describe how <br>the software algorithm can be efficiently mapped to hardware. In order<br>to achieve real-time or at least interactive frame rates we also<br>propose a slightly modified shadow filtering method that saves<br>valuable hardware resources while still achieving good image quality.
Export
BibTeX
@inproceedings{Brabec-Seidel_CGI01, TITLE = {Hardware-accelerated Rendering of Antialiased Shadows with Shadow Maps}, AUTHOR = {Brabec, Stefan and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-1007-8}, DOI = {10.1109/CGI.2001.934676}, LOCALID = {Local-ID: C125675300671F7B-C08E6B900A29B2E2C1256A8800345851-Brabec2000:HRASSM}, PUBLISHER = {IEEE}, YEAR = {2001}, DATE = {2001}, ABSTRACT = {We present a hardware-accelerated method for rendering high quality,<br>antialiased shadows using the shadow map approach. Instead of relying <br>on dedicated hardware support for shadow map filtering, we propose<br>a general rendering algorithm that can be used on most graphics <br>workstations. The filtering method softens shadow boundaries by using<br>a technique called percentage closer filtering which is commonly used<br>in software renderers, e.g ray tracing. In this paper we describe how <br>the software algorithm can be efficiently mapped to hardware. In order<br>to achieve real-time or at least interactive frame rates we also<br>propose a slightly modified shadow filtering method that saves<br>valuable hardware resources while still achieving good image quality.}, BOOKTITLE = {Proceedings Computer Graphics International 2001 (CGI 2001)}, EDITOR = {Ip, Horace Ho-Shing and Magnenat-Thalmann, Nadia and Lau, Rynson W. H. and Chua, Tat-Seng}, PAGES = {209--214}, ADDRESS = {Hong Kong, China}, }
Endnote
%0 Conference Proceedings %A Brabec, Stefan %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Hardware-accelerated Rendering of Antialiased Shadows with Shadow Maps : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-329C-6 %F EDOC: 520215 %F OTHER: Local-ID: C125675300671F7B-C08E6B900A29B2E2C1256A8800345851-Brabec2000:HRASSM %R 10.1109/CGI.2001.934676 %D 2001 %B Computer Graphics International Conference 2001 %Z date of event: 2001-07-06 - 2001-07-06 %C Hong Kong, China %X We present a hardware-accelerated method for rendering high quality,<br>antialiased shadows using the shadow map approach. Instead of relying <br>on dedicated hardware support for shadow map filtering, we propose<br>a general rendering algorithm that can be used on most graphics <br>workstations. The filtering method softens shadow boundaries by using<br>a technique called percentage closer filtering which is commonly used<br>in software renderers, e.g ray tracing. In this paper we describe how <br>the software algorithm can be efficiently mapped to hardware. In order<br>to achieve real-time or at least interactive frame rates we also<br>propose a slightly modified shadow filtering method that saves<br>valuable hardware resources while still achieving good image quality. %B Proceedings Computer Graphics International 2001 %E Ip, Horace Ho-Shing; Magnenat-Thalmann, Nadia; Lau, Rynson W. H.; Chua, Tat-Seng %P 209 - 214 %I IEEE %@ 0-7695-1007-8
Bekaert, P. and Seidel, H.-P. 2001. A Theoretical Comparison of Monte Carlo Radiosity Algorithms. Vision, Modeling and Visualization 2001 (VMV 2001), Akademische Verlagsgesellschaft Aka.
Export
BibTeX
@inproceedings{Bekaert-Seidel_VMV01, TITLE = {A Theoretical Comparison of Monte Carlo Radiosity Algorithms}, AUTHOR = {Bekaert, Philippe and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-89838-028-9}, URL = {http://wwwvis.informatik.uni-stuttgart.de/vmv01/dl/papers/19.pdf}, LOCALID = {Local-ID: C125675300671F7B-F81B5D6C536C0EE1C1256A9D004C992F-Bekaert2001_vmv}, PUBLISHER = {Akademische Verlagsgesellschaft Aka}, YEAR = {2001}, DATE = {2001}, BOOKTITLE = {Vision, Modeling and Visualization 2001 (VMV 2001)}, EDITOR = {Ertl, Thomas and Girod, Bernd and Greiner, G{\"u}nther and Niemann, Heinrich and Seidel, Hans-Peter}, PAGES = {257--264}, ADDRESS = {Stuttgart, Germany}, }
Endnote
%0 Conference Proceedings %A Bekaert, Philippe %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Theoretical Comparison of Monte Carlo Radiosity Algorithms : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-326C-3 %F EDOC: 520226 %U http://wwwvis.informatik.uni-stuttgart.de/vmv01/dl/papers/19.pdf %F OTHER: Local-ID: C125675300671F7B-F81B5D6C536C0EE1C1256A9D004C992F-Bekaert2001_vmv %D 2001 %B 6th International Fall Workshop on Vision, Modeling, and Visualization %Z date of event: 2001-11-21 - 2001-11-23 %C Stuttgart, Germany %B Vision, Modeling and Visualization 2001 %E Ertl, Thomas; Girod, Bernd; Greiner, G&#252;nther; Niemann, Heinrich; Seidel, Hans-Peter %P 257 - 264 %I Akademische Verlagsgesellschaft Aka %@ 3-89838-028-9