@article{sun2023dynamic,title={Dynamic human systems risk prognosis and control of lifting operations during prefabricated building construction},author={Sun, Zhe and Zhu, Zhufu and Xiong, Ruoxin and Tang, Pingbo and Liu, Zhansheng},journal={Developments in the Built Environment},pages={100143},year={2023},publisher={Elsevier},doi={https://doi.org/10.1016/j.dibe.2023.100143}}
2021
Machine learning using synthetic images for detecting dust emissions on construction sites
Ruoxin Xiong , and Pingbo Tang
Smart and Sustainable Built Environment. Invited paper from the CONVR 2020 , 2021
@article{xiong2021machine,title={Machine learning using synthetic images for detecting dust emissions on construction sites},author={Xiong, Ruoxin and Tang, Pingbo},journal={Smart and Sustainable Built Environment},volume={10},number={3},pages={487--503},year={2021},publisher={Emerald Publishing Limited},doi={https://doi.org/10.1108/SASBE-04-2021-0066},}
2019
Onsite video mining for construction hazards identification with visual relationships
Ruoxin Xiong , Yuanbin Song , Heng Li , and Yuxuan Wang
Widely-used video monitoring systems provide a large corpus of unstructured image data on construction sites. Although previous developed vision-based approaches can be used for hazards recognition in terms of detecting dangerous objects or unsafe operations, such detection capacity is often limited due to lack of semantic representation of visual relationships between/among the components or crews in the workplace. Accordingly, the formal representation of textural criteria for checking improper relationships should also be improved. In this regard, an Automated Hazards Identification System (AHIS) is developed to evaluate the operation descriptions generated from site videos against the safety guidelines extracted from the textual documents with the assistance of the ontology of construction safety. In particular, visual relationships are modeled as a connector between site components/operators. Moreover, both visual descriptions of site operations and semantic representations of safety guidelines are coded in the three-tuple format and then automatically converted into Horn clauses for reasoning out the potential risks. A preliminary implementation of the system was tested on two separate onsite video clips. The results showed that two types of crucial hazards, i.e., failure to wear a helmet and walking beneath the cane, were successfully identified with three rules from Safety Handbook for Construction Site Workers. In addition, the high-performance results of Recall@50 and Recall@100 demonstrated that the proposed visual relationship detection method is promising in enriching the semantic representation of operation facts extracted from site videos, which may lead to better automation in the detection of construction hazards.
@article{xiong2019onsite,title={Onsite video mining for construction hazards identification with visual relationships},author={Xiong, Ruoxin and Song, Yuanbin and Li, Heng and Wang, Yuxuan},journal={Advanced Engineering Informatics},volume={42},pages={100966},year={2019},publisher={Elsevier},doi={https://doi.org/10.1016/j.aei.2019.100966}}