@@ -10,8 +10,6 @@ params.dbsnp = false
10
10
params. known_indels1 = false
11
11
params. known_indels2 = false
12
12
params. intervals = false
13
- params. hs_metrics_target_coverage = false
14
- params. hs_metrics_per_base_coverage = false
15
13
params. skip_bqsr = false
16
14
params. skip_realignment = false
17
15
params. skip_deduplication = false
@@ -136,7 +134,7 @@ if (!params.skip_deduplication) {
136
134
cpus " ${ params.mark_duplicates_cpus} "
137
135
memory " ${ params.mark_duplicates_memory} "
138
136
tag " ${ name} "
139
- publishDir " ${ publish_dir} /${ name} /metrics" , mode: " copy" , pattern: " *.dedup_metrics"
137
+ publishDir " ${ publish_dir} /${ name} /metrics" , mode: " copy" , pattern: " *.dedup_metrics.txt "
140
138
141
139
input:
142
140
set name, bam_name, type, file(bam) from prepared_bams
@@ -145,10 +143,10 @@ if (!params.skip_deduplication) {
145
143
set val(name), val(bam_name), val(type),
146
144
file(" ${ bam.baseName} .dedup.bam" ), file(" ${ bam.baseName} .dedup.bam.bai" ) into deduplicated_bams,
147
145
deduplicated_bams_for_metrics, deduplicated_bams_for_hs_metrics
148
- file(" ${ bam.baseName} .dedup_metrics" ) optional true into deduplication_metrics
146
+ file(" ${ bam.baseName} .dedup_metrics.txt " ) optional true
149
147
150
148
script:
151
- dedup_metrics = params. skip_metrics ? " " : " --metrics-file ${ bam.baseName} .dedup_metrics"
149
+ dedup_metrics = params. skip_metrics ? " " : " --metrics-file ${ bam.baseName} .dedup_metrics.txt "
152
150
remove_duplicates = params. remove_duplicates ? " --remove-all-duplicates true" : " --remove-all-duplicates false"
153
151
"""
154
152
mkdir tmp
@@ -157,9 +155,7 @@ if (!params.skip_deduplication) {
157
155
--java-options '-Xmx${ params.mark_duplicates_memory} -Djava.io.tmpdir=tmp' \
158
156
--input ${ bam} \
159
157
--output ${ bam.baseName} .dedup.bam \
160
- --conf 'spark.executor.cores=${ task.cpus} ' \
161
- ${ remove_duplicates} \
162
- ${ dedup_metrics}
158
+ --conf 'spark.executor.cores=${ task.cpus} ' ${ remove_duplicates} ${ dedup_metrics}
163
159
"""
164
160
}
165
161
}
@@ -202,18 +198,11 @@ if (! params.skip_metrics) {
202
198
set name, bam_name, type, file(bam), file(bai) from deduplicated_bams_for_hs_metrics
203
199
204
200
output:
205
- file(" *_metrics" ) optional true into txt_hs_metrics
206
- file(" *.pdf" ) optional true into pdf_hs_metrics
207
- file(params. hs_metrics_target_coverage) optional true into target_hs_metrics
208
- file(params. hs_metrics_per_base_coverage) optional true into per_base_hs_metrics
201
+ file(" *_metrics" ) optional true
202
+ file(" *.pdf" ) optional true
203
+ file(" ${ bam.baseName} .hs_metrics.txt" )
209
204
210
205
script:
211
- hs_metrics_target_coverage= params. hs_metrics_target_coverage ?
212
- " --PER_TARGET_COVERAGE ${ params.hs_metrics_target_coverage} --REFERENCE_SEQUENCE ${ params.reference} " :
213
- " "
214
- hs_metrics_per_base_coverage= params. hs_metrics_per_base_coverage ?
215
- " --PER_BASE_COVERAGE ${ params.hs_metrics_per_base_coverage} " :
216
- " "
217
206
minimum_base_quality = params. collect_hs_metrics_min_base_quality ?
218
207
" --MINIMUM_BASE_QUALITY ${ params.collect_hs_metrics_min_base_quality} " : " "
219
208
minimum_mapping_quality = params. collect_hs_metrics_min_mapping_quality ?
@@ -224,10 +213,10 @@ if (! params.skip_metrics) {
224
213
gatk CollectHsMetrics \
225
214
--java-options '-Xmx${ params.metrics_memory} -Djava.io.tmpdir=tmp' \
226
215
--INPUT ${ bam} \
227
- --OUTPUT ${ bam.baseName} \
216
+ --OUTPUT ${ bam.baseName} .hs_metrics.txt \
228
217
--TARGET_INTERVALS ${ params.intervals} \
229
218
--BAIT_INTERVALS ${ params.intervals} \
230
- ${ hs_metrics_target_coverage } ${ hs_metrics_per_base_coverage } ${ minimum_base_quality} ${ minimum_mapping_quality}
219
+ ${ minimum_base_quality} ${ minimum_mapping_quality}
231
220
"""
232
221
}
233
222
}
0 commit comments