11# ## input layers
22"""
3- scaled_rand([rng::AbstractRNG=Utils.default_rng() ], [T=Float32 ], dims...;
3+ scaled_rand([rng], [T], dims...;
44 scaling=0.1)
55
66Create and return a matrix with random values, uniformly distributed within
@@ -41,8 +41,8 @@ function scaled_rand(rng::AbstractRNG, ::Type{T}, dims::Integer...;
4141end
4242
4343"""
44- weighted_init([rng::AbstractRNG=Utils.default_rng() ], [T=Float32 ], dims...;
45- scaling=0.1)
44+ weighted_init([rng], [T], dims...;
45+ scaling=0.1, return_sparse=true )
4646
4747Create and return a matrix representing a weighted input layer.
4848This initializer generates a weighted input matrix with random non-zero
@@ -57,6 +57,8 @@ elements distributed uniformly within the range [-`scaling`, `scaling`] [^Lu2017
5757 - `dims`: Dimensions of the matrix. Should follow `res_size x in_size`.
5858 - `scaling`: The scaling factor for the weight distribution.
5959 Defaults to `0.1`.
60+ - `return_sparse`: flag for returning a `sparse` matrix.
61+ Default is `true`.
6062
6163# Examples
6264
@@ -77,7 +79,7 @@ julia> res_input = weighted_init(8, 3)
7779 Chaos: An Interdisciplinary Journal of Nonlinear Science 27.4 (2017): 041102.
7880"""
7981function weighted_init (rng:: AbstractRNG , :: Type{T} , dims:: Integer... ;
80- scaling= T (0.1 )) where {T <: Number }
82+ scaling= T (0.1 ), return_sparse :: Bool = true ) where {T <: Number }
8183 approx_res_size, in_size = dims
8284 res_size = Int (floor (approx_res_size / in_size) * in_size)
8385 layer_matrix = DeviceAgnostic. zeros (rng, T, res_size, in_size)
@@ -88,11 +90,11 @@ function weighted_init(rng::AbstractRNG, ::Type{T}, dims::Integer...;
8890 T (0.5 )) .* (T (2 ) * scaling)
8991 end
9092
91- return layer_matrix
93+ return return_sparse ? sparse (layer_matrix) : layer_matrix
9294end
9395
9496"""
95- informed_init([rng::AbstractRNG=Utils.default_rng() ], [T=Float32 ], dims...;
97+ informed_init([rng], [T], dims...;
9698 scaling=0.1, model_in_size, gamma=0.5)
9799
98100Create an input layer for informed echo state networks [^Pathak2018].
@@ -152,7 +154,7 @@ function informed_init(rng::AbstractRNG, ::Type{T}, dims::Integer...;
152154end
153155
154156"""
155- minimal_init([rng::AbstractRNG=Utils.default_rng() ], [T=Float32 ], dims...;
157+ minimal_init([rng], [T], dims...;
156158 sampling_type=:bernoulli, weight=0.1, irrational=pi, start=1, p=0.5)
157159
158160Create a layer matrix with uniform weights determined by `weight`. The sign difference
283285# ## reservoirs
284286
285287"""
286- rand_sparse([rng::AbstractRNG=Utils.default_rng() ], [T=Float32 ], dims...;
287- radius=1.0, sparsity=0.1, std=1.0)
288+ rand_sparse([rng], [T], dims...;
289+ radius=1.0, sparsity=0.1, std=1.0, return_sparse=true )
288290
289291Create and return a random sparse reservoir matrix.
290292The matrix will be of size specified by `dims`, with specified `sparsity`
@@ -301,6 +303,8 @@ and scaled spectral radius according to `radius`.
301303 Defaults to 1.0.
302304 - `sparsity`: The sparsity level of the reservoir matrix,
303305 controlling the fraction of zero elements. Defaults to 0.1.
306+ - `return_sparse`: flag for returning a `sparse` matrix.
307+ Default is `true`.
304308
305309# Examples
306310
@@ -315,20 +319,22 @@ julia> res_matrix = rand_sparse(5, 5; sparsity=0.5)
315319```
316320"""
317321function rand_sparse (rng:: AbstractRNG , :: Type{T} , dims:: Integer... ;
318- radius= T (1.0 ), sparsity= T (0.1 ), std= T (1.0 )) where {T <: Number }
322+ radius= T (1.0 ), sparsity= T (0.1 ), std= T (1.0 ),
323+ return_sparse:: Bool = true ) where {T <: Number }
319324 lcl_sparsity = T (1 ) - sparsity # consistency with current implementations
320325 reservoir_matrix = sparse_init (rng, T, dims... ; sparsity= lcl_sparsity, std= std)
321326 rho_w = maximum (abs .(eigvals (reservoir_matrix)))
322327 reservoir_matrix .*= radius / rho_w
323328 if Inf in unique (reservoir_matrix) || - Inf in unique (reservoir_matrix)
324329 error (" Sparsity too low for size of the matrix. Increase res_size or increase sparsity" )
325330 end
326- return reservoir_matrix
331+
332+ return return_sparse ? sparse (reservoir_matrix) : reservoir_matrix
327333end
328334
329335"""
330- delay_line([rng::AbstractRNG=Utils.default_rng() ], [T=Float32 ], dims...;
331- weight=0.1)
336+ delay_line([rng], [T], dims...;
337+ weight=0.1, return_sparse=true )
332338
333339Create and return a delay line reservoir matrix [^Rodan2010].
334340
@@ -341,6 +347,8 @@ Create and return a delay line reservoir matrix [^Rodan2010].
341347 - `dims`: Dimensions of the reservoir matrix.
342348 - `weight`: Determines the value of all connections in the reservoir.
343349 Default is 0.1.
350+ - `return_sparse`: flag for returning a `sparse` matrix.
351+ Default is `true`.
344352
345353# Examples
346354
@@ -366,7 +374,7 @@ julia> res_matrix = delay_line(5, 5; weight=1)
366374 IEEE transactions on neural networks 22.1 (2010): 131-144.
367375"""
368376function delay_line (rng:: AbstractRNG , :: Type{T} , dims:: Integer... ;
369- weight= T (0.1 )) where {T <: Number }
377+ weight= T (0.1 ), return_sparse :: Bool = true ) where {T <: Number }
370378 reservoir_matrix = DeviceAgnostic. zeros (rng, T, dims... )
371379 @assert length (dims) == 2 && dims[1 ] == dims[2 ] " The dimensions
372380 must define a square matrix (e.g., (100, 100))"
@@ -375,12 +383,12 @@ function delay_line(rng::AbstractRNG, ::Type{T}, dims::Integer...;
375383 reservoir_matrix[i + 1 , i] = weight
376384 end
377385
378- return reservoir_matrix
386+ return return_sparse ? sparse (reservoir_matrix) : reservoir_matrix
379387end
380388
381389"""
382- delay_line_backward([rng::AbstractRNG=Utils.default_rng() ], [T=Float32 ], dims...;
383- weight = 0.1, fb_weight = 0.2)
390+ delay_line_backward([rng], [T], dims...;
391+ weight = 0.1, fb_weight = 0.2, return_sparse=true )
384392
385393Create a delay line backward reservoir with the specified by `dims` and weights.
386394Creates a matrix with backward connections as described in [^Rodan2010].
@@ -396,6 +404,8 @@ Creates a matrix with backward connections as described in [^Rodan2010].
396404 forward connections in the reservoir. Default is 0.1
397405 - `fb_weight`: Determines the absolute value of backward connections
398406 in the reservoir. Default is 0.2
407+ - `return_sparse`: flag for returning a `sparse` matrix.
408+ Default is `true`.
399409
400410# Examples
401411
@@ -421,7 +431,7 @@ julia> res_matrix = delay_line_backward(Float16, 5, 5)
421431 IEEE transactions on neural networks 22.1 (2010): 131-144.
422432"""
423433function delay_line_backward (rng:: AbstractRNG , :: Type{T} , dims:: Integer... ;
424- weight= T (0.1 ), fb_weight= T (0.2 )) where {T <: Number }
434+ weight= T (0.1 ), fb_weight= T (0.2 ), return_sparse :: Bool = true ) where {T <: Number }
425435 res_size = first (dims)
426436 reservoir_matrix = DeviceAgnostic. zeros (rng, T, dims... )
427437
@@ -430,12 +440,12 @@ function delay_line_backward(rng::AbstractRNG, ::Type{T}, dims::Integer...;
430440 reservoir_matrix[i, i + 1 ] = fb_weight
431441 end
432442
433- return reservoir_matrix
443+ return return_sparse ? sparse (reservoir_matrix) : reservoir_matrix
434444end
435445
436446"""
437- cycle_jumps([rng::AbstractRNG=Utils.default_rng() ], [T=Float32 ], dims...;
438- cycle_weight = 0.1, jump_weight = 0.1, jump_size = 3)
447+ cycle_jumps([rng], [T], dims...;
448+ cycle_weight = 0.1, jump_weight = 0.1, jump_size = 3, return_sparse=true )
439449
440450Create a cycle jumps reservoir with the specified dimensions,
441451cycle weight, jump weight, and jump size.
@@ -453,6 +463,8 @@ cycle weight, jump weight, and jump size.
453463 Default is 0.1.
454464 - `jump_size`: The number of steps between jump connections.
455465 Default is 3.
466+ - `return_sparse`: flag for returning a `sparse` matrix.
467+ Default is `true`.
456468
457469# Examples
458470
@@ -479,7 +491,7 @@ julia> res_matrix = cycle_jumps(5, 5; jump_size=2)
479491"""
480492function cycle_jumps (rng:: AbstractRNG , :: Type{T} , dims:: Integer... ;
481493 cycle_weight:: Number = T (0.1 ), jump_weight:: Number = T (0.1 ),
482- jump_size:: Int = 3 ) where {T <: Number }
494+ jump_size:: Int = 3 , return_sparse :: Bool = true ) where {T <: Number }
483495 res_size = first (dims)
484496 reservoir_matrix = DeviceAgnostic. zeros (rng, T, dims... )
485497
@@ -498,12 +510,12 @@ function cycle_jumps(rng::AbstractRNG, ::Type{T}, dims::Integer...;
498510 reservoir_matrix[tmp, i] = jump_weight
499511 end
500512
501- return reservoir_matrix
513+ return return_sparse ? sparse (reservoir_matrix) : reservoir_matrix
502514end
503515
504516"""
505- simple_cycle([rng::AbstractRNG=Utils.default_rng() ], [T=Float32 ], dims...;
506- weight = 0.1)
517+ simple_cycle([rng], [T], dims...;
518+ weight = 0.1, return_sparse=true )
507519
508520Create a simple cycle reservoir with the specified dimensions and weight.
509521
@@ -515,6 +527,8 @@ Create a simple cycle reservoir with the specified dimensions and weight.
515527 - `dims`: Dimensions of the reservoir matrix.
516528 - `weight`: Weight of the connections in the reservoir matrix.
517529 Default is 0.1.
530+ - `return_sparse`: flag for returning a `sparse` matrix.
531+ Default is `true`.
518532
519533# Examples
520534
@@ -540,20 +554,21 @@ julia> res_matrix = simple_cycle(5, 5; weight=11)
540554 IEEE transactions on neural networks 22.1 (2010): 131-144.
541555"""
542556function simple_cycle (rng:: AbstractRNG , :: Type{T} , dims:: Integer... ;
543- weight= T (0.1 )) where {T <: Number }
557+ weight= T (0.1 ), return_sparse :: Bool = true ) where {T <: Number }
544558 reservoir_matrix = DeviceAgnostic. zeros (rng, T, dims... )
545559
546560 for i in 1 : (dims[1 ] - 1 )
547561 reservoir_matrix[i + 1 , i] = weight
548562 end
549563
550564 reservoir_matrix[1 , dims[1 ]] = weight
551- return reservoir_matrix
565+ return return_sparse ? sparse (reservoir_matrix) : reservoir_matrix
552566end
553567
554568"""
555- pseudo_svd([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...;
556- max_value=1.0, sparsity=0.1, sorted = true, reverse_sort = false)
569+ pseudo_svd([rng], [T], dims...;
570+ max_value=1.0, sparsity=0.1, sorted = true, reverse_sort = false,
571+ return_sparse=true)
557572
558573Returns an initializer to build a sparse reservoir matrix with the given
559574`sparsity` by using a pseudo-SVD approach as described in [^yang].
@@ -573,6 +588,8 @@ Returns an initializer to build a sparse reservoir matrix with the given
573588 creating the diagonal matrix. Default is `true`.
574589 - `reverse_sort`: A boolean indicating whether to reverse the sorted
575590 singular values. Default is `false`.
591+ - `return_sparse`: flag for returning a `sparse` matrix.
592+ Default is `true`.
576593
577594# Examples
578595
@@ -590,7 +607,7 @@ julia> res_matrix = pseudo_svd(5, 5)
590607"""
591608function pseudo_svd (rng:: AbstractRNG , :: Type{T} , dims:: Integer... ;
592609 max_value:: Number = T (1.0 ), sparsity:: Number = 0.1 , sorted:: Bool = true ,
593- reverse_sort:: Bool = false ) where {T <: Number }
610+ reverse_sort:: Bool = false , return_sparse :: Bool = true ) where {T <: Number }
594611 reservoir_matrix = create_diag (rng, T, dims[1 ],
595612 max_value;
596613 sorted= sorted,
@@ -605,7 +622,7 @@ function pseudo_svd(rng::AbstractRNG, ::Type{T}, dims::Integer...;
605622 tmp_sparsity = get_sparsity (reservoir_matrix, dims[1 ])
606623 end
607624
608- return reservoir_matrix
625+ return return_sparse ? sparse (reservoir_matrix) : reservoir_matrix
609626end
610627
611628# hacky workaround for the moment
0 commit comments