diff --git a/src/transformers/agents/prompts.py b/src/transformers/agents/prompts.py index 661df9bd24e7ee..3a867e8dc9bfe0 100644 --- a/src/transformers/agents/prompts.py +++ b/src/transformers/agents/prompts.py @@ -123,7 +123,7 @@ def download_prompt(prompt_or_repo_id, agent_name, mode="run"): ``` --- -Above example were using tools that might not exist for you. You only have acces to those Tools: +Above example were using tools that might not exist for you. You only have access to those Tools: <> Remember to make sure that variables you use are all defined. @@ -145,7 +145,7 @@ def download_prompt(prompt_or_repo_id, agent_name, mode="run"): "action_input": $INPUT } -Make sure to have the $INPUT as a dictionnary in the right format for the tool you are using, and do not put variable names as input if you can find the right values. +Make sure to have the $INPUT as a dictionary in the right format for the tool you are using, and do not put variable names as input if you can find the right values. You should ALWAYS use the following format: @@ -250,7 +250,7 @@ def download_prompt(prompt_or_repo_id, agent_name, mode="run"): } -Above example were using notional tools that might not exist for you. You only have acces to those tools: +Above example were using notional tools that might not exist for you. You only have access to those tools: <> Here are the rules you should always follow to solve your task: diff --git a/src/transformers/agents/python_interpreter.py b/src/transformers/agents/python_interpreter.py index 39814daa7f5649..04f62a8acfb959 100644 --- a/src/transformers/agents/python_interpreter.py +++ b/src/transformers/agents/python_interpreter.py @@ -628,7 +628,7 @@ def evaluate_ast( Args: expression (`ast.AST`): - The code to evaluate, as an abastract syntax tree. + The code to evaluate, as an abstract syntax tree. state (`Dict[str, Any]`): A dictionary mapping variable names to values. The `state` is updated if need be when the evaluation encounters assignements. @@ -640,7 +640,7 @@ def evaluate_ast( Add more at your own risk! """ if isinstance(expression, ast.Assign): - # Assignement -> we evaluate the assignement which should update the state + # Assignement -> we evaluate the assignment which should update the state # We return the variable assigned as it may be used to determine the final result. return evaluate_assign(expression, state, tools) elif isinstance(expression, ast.AugAssign): diff --git a/src/transformers/audio_utils.py b/src/transformers/audio_utils.py index 9553bc58488f5c..dc51cda1b76d11 100644 --- a/src/transformers/audio_utils.py +++ b/src/transformers/audio_utils.py @@ -1074,7 +1074,7 @@ def stft(frames: np.array, windowing_function: np.array, fft_window_size: int = frames (`np.array` of dimension `(num_frames, fft_window_size)`): A framed audio signal obtained using `audio_utils.fram_wav`. windowing_function (`np.array` of dimension `(nb_frequency_bins, nb_mel_filters)`: - A array reprensenting the function that will be used to reduces the amplitude of the discontinuities at the + A array representing the function that will be used to reduces the amplitude of the discontinuities at the boundaries of each frame when computing the STFT. Each frame will be multiplied by the windowing_function. For more information on the discontinuities, called *Spectral leakage*, refer to [this tutorial]https://download.ni.com/evaluation/pxi/Understanding%20FFTs%20and%20Windowing.pdf diff --git a/src/transformers/cache_utils.py b/src/transformers/cache_utils.py index 04ba337ef436b3..40d5a343dab9b9 100644 --- a/src/transformers/cache_utils.py +++ b/src/transformers/cache_utils.py @@ -214,7 +214,7 @@ class QuantizedCacheConfig(CacheConfig): compute_dtype (`torch.dtype`, *optional*, defaults to `torch.float16`): The defualt dtype used for computations in the model. Keys and Values will be cast to this dtype after dequantization. device (`str`, *optional*, defaults to `"cpu"`): - Device on which to peform computations, should be same as the model's device. + Device on which to perform computations, should be same as the model's device. """ def __init__( diff --git a/src/transformers/generation/flax_logits_process.py b/src/transformers/generation/flax_logits_process.py index 84b5a38d5de4da..9b2ab5fb1afa47 100644 --- a/src/transformers/generation/flax_logits_process.py +++ b/src/transformers/generation/flax_logits_process.py @@ -476,7 +476,7 @@ def __init__(self, ngram_size: int): def get_previous_ngrams(self, input_ids: jnp.ndarray, vocab_size: int, cur_len: int): """ get a matrix of size (batch_size,) + (vocab_size,)*n (for n-grams) that - represent the n-grams that occured previously. + represent the n-grams that occurred previously. The BCOO representation allow to store only the few non-zero entries, instead of the full (huge) matrix """ batch_size, seq_len = input_ids.shape