Skip to content

Commit

Permalink
Merge pull request #346 from donglihe-hub/new_attxml
Browse files Browse the repository at this point in the history
Refactored AttentionXML
  • Loading branch information
Eleven1Liu authored Apr 8, 2024
2 parents d9b2d8a + a6521f8 commit ab7685c
Show file tree
Hide file tree
Showing 9 changed files with 1,080 additions and 0 deletions.
41 changes: 41 additions & 0 deletions example_config/AmazonCat-13K/attentionxml.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
data_name: AmazonCat-13K
training_file: data/AmazonCat-13K/train.txt
test_file: data/AmazonCat-13K/test.txt
# pretrained embeddings
embed_file: glove.840B.300d

# preprocessing
min_vocab_freq: 1
max_seq_length: 500

# label tree related parameters
cluster_size: 8
beam_width: 64

# data
batch_size: 200
val_size: 4000
shuffle: true

# eval
eval_batch_size: 200
monitor_metrics: [P@1, P@3, P@5, nDCG@3, nDCG@5, RP@3, RP@5]
val_metric: nDCG@5

# train
seed: 1337
epochs: 10
optimizer: adam
learning_rate: 0.001
# early stopping
patience: 10

# model
model_name: AttentionXML
network_config:
embed_dropout: 0.2
post_encoder_dropout: 0.5
rnn_dim: 1024
rnn_layers: 1
linear_size: [512, 256]
freeze_embed_training: false
41 changes: 41 additions & 0 deletions example_config/EUR-Lex/attentionxml.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
data_name: EUR-Lex
training_file: data/EUR-Lex/train.txt
test_file: data/EUR-Lex/test.txt
# pretrained embeddings
embed_file: glove.840B.300d

# preprocessing
min_vocab_freq: 1
max_seq_length: 500

# AttentionXML-related parameters
cluster_size: 8
beam_width: 64

# dataloader
batch_size: 40
val_size: 200
shuffle: true

# eval
eval_batch_size: 40
monitor_metrics: [P@1, P@3, P@5, nDCG@3, nDCG@5, RP@3, RP@5]
val_metric: nDCG@5

# train
seed: 1337
epochs: 30
optimizer: adam
learning_rate: 0.001
# early stopping
patience: 30

# model
model_name: AttentionXML
network_config:
embed_dropout: 0.2
post_encoder_dropout: 0.5
rnn_dim: 512
rnn_layers: 1
linear_size: [256]
freeze_embed_training: True
41 changes: 41 additions & 0 deletions example_config/Wiki10-31K/attentionxml.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
data_name: Wiki10-31K
training_file: data/Wiki10-31K/train.txt
test_file: data/Wiki10-31K/test.txt
# pretrained embeddings
embed_file: glove.840B.300d

# preprocessing
min_vocab_freq: 1
max_seq_length: 500

# label tree related parameters
cluster_size: 8
beam_width: 64

# dataloader
batch_size: 40
val_size: 200
shuffle: true

# eval
eval_batch_size: 40
monitor_metrics: [P@1, P@3, P@5, nDCG@3, nDCG@5, RP@3, RP@5]
val_metric: nDCG@5

# train
seed: 1337
epochs: 30
optimizer: adam
learning_rate: 0.001
# early stopping
patience: 30

# model
model_name: AttentionXML
network_config:
embed_dropout: 0.2
encoder_dropout: 0.5
rnn_dim: 512
rnn_layers: 1
linear_size: [256]
freeze_embed_training: True
Loading

0 comments on commit ab7685c

Please sign in to comment.