/aosp_15_r20/external/libopus/dnn/torch/lpcnet/utils/layers/ |
H A D | pcm_embeddings.py | 39 def __init__(self, embed_dim=128, num_levels=256): argument 60 def __init__(self, embed_dim, num_levels=256): argument
|
/aosp_15_r20/external/pytorch/benchmarks/transformer/ |
H A D | attention_bias_benchmarks.py | 70 batch_size, q_sequence_length, kv_sequence_length, embed_dim, dtype, device argument 81 def __init__(self, num_heads, embed_dim, device=None, dtype=None): argument
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/transformers/ |
H A D | transformer.cpp | 65 const int64_t embed_dim, in norm() 77 const int64_t embed_dim, in transformer_encoder_layer_forward()
|
H A D | attention.cpp | 189 const int64_t embed_dim, in qkv_projection() 265 const int64_t embed_dim, in native_multi_head_attention_cpu() 856 const int64_t embed_dim, in triton_multi_head_attention()
|
/aosp_15_r20/external/executorch/exir/tests/ |
H A D | transformer.py | 15 def __init__(self, embed_dim, num_heads=2): argument
|
/aosp_15_r20/external/pytorch/torch/csrc/api/src/nn/options/ |
H A D | activation.cpp | 25 int64_t embed_dim, in MultiheadAttentionOptions()
|
/aosp_15_r20/external/pytorch/torch/csrc/api/include/torch/nn/modules/ |
H A D | activation.h | 826 MultiheadAttentionImpl(int64_t embed_dim, int64_t num_heads) in MultiheadAttentionImpl()
|
/aosp_15_r20/external/pytorch/test/ |
H A D | test_native_mha.py | 160 def __init__(self, embed_dim, num_heads, qkv, proj): argument
|
H A D | test_transformers.py | 902 embed_dim, argument
|
H A D | test_jit.py | 14992 def __init__(self, embed_dim, num_heads): argument
|
/aosp_15_r20/external/pytorch/torch/csrc/api/include/torch/nn/functional/ |
H A D | activation.h | 673 const auto& embed_dim = query_sizes[2]; variable
|
/aosp_15_r20/external/tensorflow/tensorflow/python/kernel_tests/nn_ops/ |
H A D | embedding_ops_test.py | 812 def _random_weights(self, vocab_size=4, embed_dim=4, num_shards=1): argument
|
/aosp_15_r20/external/pytorch/test/cpp/api/ |
H A D | sequential.cpp | 500 int64_t embed_dim = 8; in TEST_F() local
|
/aosp_15_r20/external/pytorch/torch/nn/modules/ |
H A D | activation.py | 1042 embed_dim, argument
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/transformers/cuda/ |
H A D | attention.cu | 483 const int64_t embed_dim, in native_multi_head_attention_cuda()
|
/aosp_15_r20/external/pytorch/test/inductor/ |
H A D | test_cpu_repro.py | 295 embed_dim, argument
|