@article {Gong650, author = {Kuang Gong and Jaewon Yang and Kyungsang Kim and Georges El Fakhri and Youngho Seo and Quanzheng Li}, title = {Attenuation Correction of PET/MR Using Deep Neural Network Based on Dixon and ZTE MR Images}, volume = {59}, number = {supplement 1}, pages = {650--650}, year = {2018}, publisher = {Society of Nuclear Medicine}, abstract = {650Objectives: To obtain meaningful quantitative results from PET images, attenuation correction is necessary during image reconstruction. For PET/MR hybrid systems, attenuation correction is challenging as MR images do not reflect attenuation coefficients directly. To address this issue, we present a deep neural network to derive the continuous attenuation coefficients for brain PET/MR imaging using both Dixon and zero echo time (ZTE) MR images. To make fully use of multiple MR images, a new network structure was proposed to extract the features from Dixon and ZTE images independently at early layers and combine them together at later layers. Methods: Fourteen patients (age 60.3y{\textpm}10.2, F=6, M=8) who had whole-body PET/CT scan, followed by additional PET/MRI scanning without second tracer administration, were included in this study. Both Dixon and ZTE images were acquired during the PET/MRI scanning with average scan duration of 259.3 {\textpm} 94.2 s. The average administered FDG dose was 275.9 {\textpm} 60.6 MBq. When preparing the network training pairs, we first registered CT images and ZTE images to the Dixon MR images through rigid transformation using the ANTs software. Then random rotation and permutation was performed on the training pairs to avoid over-fitting. Later quantitative analysis was based on Seven-Fold cross validation. The proposed network structure is shown in Fig. 1(A), which extracts features from different inputs independently and combines them together in later stage. To make full use of the axial information, nine neighboring axial slices were stacked, occupying nine input channels, to reduce the axial aliasing artifacts. The network structures were implemented in TensorFlow 1.1.0. Batch size was set to 30 and 1000 epochs were run. This method is denoted as DixonZTE-CNN. The atlas method based on Dixon MR image (denoted as Dixon-Atlas) and the ZTE segmentation method based on ZTE MR image (denoted as ZTE-Seg) were adopted as baseline methods. Regional as well as global relative PET errors were used to compare the performance of different methods. Results: The proposed DixonZTE-CNN can generate pseudo-CT images with more structure details (as presented in Fig 1). Quantitative analysis (as presented in Table 1) shows that DixonZTE-CNN achieved the lowest whole brain error (2.31\% {\textpm} 0.60\%) compared with Dixon-Atlas (5.50\% {\textpm} 1.46\%) and ZTE-Seg (3.76\% {\textpm} 1.43\%)). Regional analysis in cerebellum, brain lobes and inner regions also demonstrates the improvements by using DixonZTE-CNN. Conclusion: We have proposed a deep neural network to generate continuous attenuation map for brain PET/MR imaging based on both Dixon and ZTE images. Analysis using real data sets shows that the neural network can produce smaller quantification errors in PET image compared to other standard methods, and thus provide a promising new approach for the attenuation correction of PET/MR View this table:Relative PET error for different regions}, issn = {0161-5505}, URL = {https://jnm.snmjournals.org/content/59/supplement_1/650}, eprint = {https://jnm.snmjournals.org/content}, journal = {Journal of Nuclear Medicine} }