From a1ec166ee46a4ec66610984ca7233aa03ea625a8 Mon Sep 17 00:00:00 2001 From: Mr-Neutr0n <64578610+Mr-Neutr0n@users.noreply.github.com> Date: Thu, 12 Feb 2026 00:13:18 +0530 Subject: [PATCH] fix: use raise instead of return for exceptions and avoid mutable default args - han/utils.py: change `return NotImplementedError` to `raise NotImplementedError` - evolveGCN/train.py: change `return NotImplementedError` to `raise NotImplementedError` - bgnn/BGNN.py: change `raise NotImplemented` to `raise NotImplementedError` - jtnn/jtnn_dec.py: replace mutable default `clique=[]` with `None` - jtnn/chemutils.py: replace mutable defaults `prev_nodes=[]`, `prev_amap=[]` with `None` `return NotImplementedError(...)` silently returns the exception object instead of raising it, so callers never see the error. `raise NotImplemented(...)` raises a `TypeError` with a confusing message because `NotImplemented` is a built-in constant, not an exception class. Mutable default arguments are shared across calls and can cause subtle bugs when mutated. --- examples/pytorch/bgnn/BGNN.py | 2 +- examples/pytorch/evolveGCN/train.py | 2 +- examples/pytorch/han/utils.py | 2 +- examples/pytorch/jtnn/jtnn/chemutils.py | 6 +++++- examples/pytorch/jtnn/jtnn/jtnn_dec.py | 4 +++- 5 files changed, 11 insertions(+), 5 deletions(-) diff --git a/examples/pytorch/bgnn/BGNN.py b/examples/pytorch/bgnn/BGNN.py index 16f2dd21432d..27643d489f38 100644 --- a/examples/pytorch/bgnn/BGNN.py +++ b/examples/pytorch/bgnn/BGNN.py @@ -209,7 +209,7 @@ def train_model(self, model_in, target_labels, train_mask, optimizer): elif self.task == "classification": loss = F.cross_entropy(pred, y.long()) else: - raise NotImplemented( + raise NotImplementedError( "Unknown task. Supported tasks: classification, regression." ) diff --git a/examples/pytorch/evolveGCN/train.py b/examples/pytorch/evolveGCN/train.py index 92484a7a1943..d588841d906a 100644 --- a/examples/pytorch/evolveGCN/train.py +++ b/examples/pytorch/evolveGCN/train.py @@ -41,7 +41,7 @@ def train(args, device): in_feats=int(g.ndata["feat"].shape[1]), num_layers=args.n_layers ) else: - return NotImplementedError("Unsupported model {}".format(args.model)) + raise NotImplementedError("Unsupported model {}".format(args.model)) model = model.to(device) optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) diff --git a/examples/pytorch/han/utils.py b/examples/pytorch/han/utils.py index 0b6b6054b25f..250a6abd3c91 100644 --- a/examples/pytorch/han/utils.py +++ b/examples/pytorch/han/utils.py @@ -263,7 +263,7 @@ def load_data(dataset, remove_self_loop=False): elif dataset == "ACMRaw": return load_acm_raw(remove_self_loop) else: - return NotImplementedError("Unsupported dataset {}".format(dataset)) + raise NotImplementedError("Unsupported dataset {}".format(dataset)) class EarlyStopping(object): diff --git a/examples/pytorch/jtnn/jtnn/chemutils.py b/examples/pytorch/jtnn/jtnn/chemutils.py index 88ab8c21850f..50d797f5f472 100644 --- a/examples/pytorch/jtnn/jtnn/chemutils.py +++ b/examples/pytorch/jtnn/jtnn/chemutils.py @@ -314,7 +314,11 @@ def enum_attach_nx(ctr_mol, nei_node, amap, singletons): # Try rings first: Speed-Up -def enum_assemble_nx(node, neighbors, prev_nodes=[], prev_amap=[]): +def enum_assemble_nx(node, neighbors, prev_nodes=None, prev_amap=None): + if prev_nodes is None: + prev_nodes = [] + if prev_amap is None: + prev_amap = [] all_attach_confs = [] singletons = [ nei_node["nid"] diff --git a/examples/pytorch/jtnn/jtnn/jtnn_dec.py b/examples/pytorch/jtnn/jtnn/jtnn_dec.py index 489ff71035c9..ee99782d12a8 100644 --- a/examples/pytorch/jtnn/jtnn/jtnn_dec.py +++ b/examples/pytorch/jtnn/jtnn/jtnn_dec.py @@ -80,7 +80,9 @@ def can_assemble(mol_tree, u, v_node_dict): return len(cands) > 0 -def create_node_dict(smiles, clique=[]): +def create_node_dict(smiles, clique=None): + if clique is None: + clique = [] return dict( smiles=smiles, mol=get_mol(smiles),