Skip to content

Commit

Permalink
[mojo-lang] Rename inout -> mut and borrowed -> read
Browse files Browse the repository at this point in the history
Per extensive discussion over on this public thread:
modularml/mojo#3623

We're moving to rename the `inout` argument convention to be called
simply `mut`, and renames `borrowed` to `read` which can still be
generally
elided.  This reduces the need to understand references for the
basic conventions that many people work with, while providing a more
strictly-correct and consistent model. These words are now "soft"
keywords
instead of "hard" keywords as well.

This still maintains support for the `inout` and `borrowed` keywords,
though
they will eventually be removed.

MODULAR_ORIG_COMMIT_REV_ID: e2b41cfb4cb8bb0b2e67ade93d32d7ef8989428e
  • Loading branch information
lattner authored and modularbot committed Dec 17, 2024
1 parent 7995339 commit 6b9fa93
Show file tree
Hide file tree
Showing 37 changed files with 113 additions and 113 deletions.
2 changes: 1 addition & 1 deletion examples/graph-api/pipelines/benchmarks/human_eval.🔥
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ struct HumanEval:
def get_problems(self) -> PythonObject:
return self._human_eval_module.read_problems()

def add_sample(inout self, task_id: PythonObject, completion: String):
def add_sample(mut self, task_id: PythonObject, completion: String):
sample = Python.dict()
sample["task_id"] = task_id
sample["completion"] = PythonObject(completion)
Expand Down
2 changes: 1 addition & 1 deletion examples/graph-api/pipelines/configs/llama.🔥
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ struct LlamaConfigRegistry(ConfigRegistry):
var registry: ConfigRegistryDict

def __init__(
inout self,
mut self,
additional_pipeline_args: ConfigRegistryDict = ConfigRegistryDict(),
):
"""
Expand Down
4 changes: 2 additions & 2 deletions examples/graph-api/pipelines/llama2/model.🔥
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ struct QuantizedLlama2[
fn __init__(out self, owned model: model_type):
self.model = model^

def build_graph(inout self, name: String) -> Graph:
def build_graph(mut self, name: String) -> Graph:
params = self.hyperparams()
cache_type = TensorType(
DType.float32,
Expand Down Expand Up @@ -188,7 +188,7 @@ struct Llama2[
def __init__(out self, owned model: model_type):
self.model = model^

def build_graph(inout self, name: String) -> Graph:
def build_graph(mut self, name: String) -> Graph:
params = self.model.hyperparams()

cache_type = OpaqueType(
Expand Down
14 changes: 7 additions & 7 deletions examples/graph-api/pipelines/llama2/run.🔥
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ struct Config:
"Alternatively provide a `--quantization-encoding`"
)

fn get(inout self, key: String) raises -> OptionValue:
fn get(mut self, key: String) raises -> OptionValue:
"""Returns an option value for `key` in the underlying config.
Args:
Expand All @@ -111,7 +111,7 @@ struct Config:
"""
return self.config[key]

fn set(inout self, key: String, val: OptionValue):
fn set(mut self, key: String, val: OptionValue):
"""Sets a new value for a given config key. This will overwrite the old
value if the key is already present.
Expand Down Expand Up @@ -146,11 +146,11 @@ def _get_attention_mask(
def _generate_q_text_with_tokenizer[
tokenizer_type: Tokenizer,
](
inout tokenizer: tokenizer_type,
mut tokenizer: tokenizer_type,
compiled_model: Model,
params: LlamaHParams,
config: Config,
inout metrics: Metrics,
mut metrics: Metrics,
execution_device: Device,
):
host_device = cpu_device()
Expand Down Expand Up @@ -228,11 +228,11 @@ def _generate_text_with_tokenizer[
tokenizer_type: Tokenizer,
kv_params: KVCacheStaticParams,
](
inout tokenizer: tokenizer_type,
mut tokenizer: tokenizer_type,
compiled_model: Model,
params: LlamaHParams,
config: Config,
inout metrics: Metrics,
mut metrics: Metrics,
execution_device: Device,
):
host_device = cpu_device()
Expand Down Expand Up @@ -351,7 +351,7 @@ def generate_text(
params: LlamaHParams,
config: Config,
execution_device: Device,
inout metrics: Metrics,
mut metrics: Metrics,
):
"""Generated text by applying the compiled model to the provided prompt."""
mojo_tokenizer = BPETokenizer.from_file(config.get("tokenizer-path")[Path])
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ struct ArenaLinkedList[ElementType: CollectionElement]:
"""Checks whether the node is still in the list."""
return 0 <= id < len(self._arena) and self.node(id)

def append(inout self, owned value: ElementType) -> Self.ID:
def append(mut self, owned value: ElementType) -> Self.ID:
"""Adds a new element to the back of the list."""
id = len(self._arena)
node = Node[ElementType](value^, self._tail, None)
Expand All @@ -74,7 +74,7 @@ struct ArenaLinkedList[ElementType: CollectionElement]:
self._arena.append(node)
return id

def remove(inout self, id: Self.ID):
def remove(mut self, id: Self.ID):
"""Removes an element from the list."""
debug_assert(bool(self.node(id)), "removing item not in list")
debug_assert(bool(self._head), "removing from empty list")
Expand Down
6 changes: 3 additions & 3 deletions examples/graph-api/pipelines/llama2/tokenizer/bpe.🔥
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ struct MergeOption(OrderedElement):
)


def read[T: CollectionElement](inout span: Span[Byte, _]) -> T:
def read[T: CollectionElement](mut span: Span[Byte, _]) -> T:
"""Read a binary type out of a byte buffer and increment the pointer."""
value = span.unsafe_ptr().bitcast[T]()[]
span = span[sizeof[T]() :]
Expand Down Expand Up @@ -110,13 +110,13 @@ struct BPETokenizer(Tokenizer):
with open(path, "r") as file:
return Self.from_bytes(file.read())

def add_token(inout self, token: String, score: Float32):
def add_token(mut self, token: String, score: Float32):
"""Add a token to the vocabulary."""
if token not in self.token_ids:
self.token_ids[token] = len(self.vocab)
self.vocab.append(Token(token, score))

def decode(inout self, output_tokens: List[Int64]) -> String:
def decode(mut self, output_tokens: List[Int64]) -> String:
"""Decodes a string by indexing the vocabulary."""
decoded = String()
for token_id in output_tokens:
Expand Down
8 changes: 4 additions & 4 deletions examples/graph-api/pipelines/llama2/tokenizer/max_heap.🔥
Original file line number Diff line number Diff line change
Expand Up @@ -62,12 +62,12 @@ struct MaxHeap[ElementType: OrderedElement, reverse: Bool = False](
"""Checks whether the heap has any elements in it."""
return len(self) != 0

fn push(inout self, owned elem: ElementType):
fn push(mut self, owned elem: ElementType):
"""Adds a value to the heap."""
self.heap.append(elem^)
self._bubble_up(len(self.heap) - 1)

fn pop(inout self) -> ElementType:
fn pop(mut self) -> ElementType:
"""Removes the top element from the heap and return it."""
debug_assert(bool(self), "heap is empty")

Expand All @@ -76,7 +76,7 @@ struct MaxHeap[ElementType: OrderedElement, reverse: Bool = False](
self._sink_down(0)
return top^

fn _bubble_up(inout self, idx: Int):
fn _bubble_up(mut self, idx: Int):
if idx == 0:
return

Expand All @@ -89,7 +89,7 @@ struct MaxHeap[ElementType: OrderedElement, reverse: Bool = False](
)
self._bubble_up(parent_idx)

fn _sink_down(inout self, idx: Int):
fn _sink_down(mut self, idx: Int):
var li = _left_child_idx(idx)
var ri = _right_child_idx(idx)

Expand Down
4 changes: 2 additions & 2 deletions examples/graph-api/pipelines/llama3/kv_cache.🔥
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ struct KVCache:
var sequence_length: Int

def __init__(
inout self,
mut self,
hp: LlamaHParams,
max_length: Int,
batch_size: Int,
Expand All @@ -51,7 +51,7 @@ struct KVCache:

self.sequence_length = 0

def update(inout self, owned keys: AnyMemory, owned values: AnyMemory):
def update(mut self, owned keys: AnyMemory, owned values: AnyMemory):
"""Updates the KV Cache with data from new tokens."""
cpu_device = self.keys.device()
keys_tensor = keys^.to_device_tensor().move_to(cpu_device)
Expand Down
4 changes: 2 additions & 2 deletions examples/graph-api/pipelines/llama3/model/llama.🔥
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ struct Llama3_NaiveKVCache[encoding: QuantizationEncoding = Float32Encoding]:
def __init__(out self, model_path: Path):
self.model = GGUFFile(model_path)

def build_graph(inout self, name: String) -> Graph:
def build_graph(mut self, name: String) -> Graph:
params = self.hyperparams()
alias model_dtype = DType.bfloat16 if encoding.id() == BFloat16Encoding.id() else DType.float32
cache_type = TensorType(
Expand Down Expand Up @@ -224,7 +224,7 @@ struct Llama3[
def __init__(out self, model_path: Path):
self.model = GGUFFile(model_path)

def build_graph(inout self, name: String) -> Graph:
def build_graph(mut self, name: String) -> Graph:
params = self.hyperparams()
cache_type = OpaqueType(
ContiguousKVCacheCollection[type, kv_params].id()
Expand Down
10 changes: 5 additions & 5 deletions examples/graph-api/pipelines/llama3/run.🔥
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ struct Config:
if encoding[String] == "bfloat16":
raise "bfloat16 is not currently supported on ARM"

fn get(inout self, key: String) raises -> OptionValue:
fn get(mut self, key: String) raises -> OptionValue:
"""Returns an option value for `key` in the underlying config.
Args:
Expand All @@ -109,7 +109,7 @@ struct Config:
"""
return self.config[key]

fn set(inout self, key: String, val: OptionValue):
fn set(mut self, key: String, val: OptionValue):
"""Sets a new value for a given config key. This will overwrite the old
value if the key is already present.
Expand Down Expand Up @@ -143,7 +143,7 @@ def _get_attention_mask(
def _generation_loop[
type: DType, kv_params: KVCacheStaticParams
](
inout metrics: Metrics,
mut metrics: Metrics,
n_layers: Int,
compiled_model: Model,
tokenizer: TikTokenEncoder,
Expand Down Expand Up @@ -254,7 +254,7 @@ def generate_text_naive(
compiled_model: Model,
params: LlamaHParams,
config: Config,
inout metrics: Metrics,
mut metrics: Metrics,
execution_device: Device,
use_gpu: Bool,
):
Expand Down Expand Up @@ -393,7 +393,7 @@ def generate_text[
compiled_model: Model,
params: LlamaHParams,
config: Config,
inout metrics: Metrics,
mut metrics: Metrics,
execution_device: Device,
use_gpu: Bool,
):
Expand Down
22 changes: 11 additions & 11 deletions examples/graph-api/pipelines/metrics/metrics.🔥
Original file line number Diff line number Diff line change
Expand Up @@ -52,53 +52,53 @@ struct Metrics:
self.traces.append(Trace[TraceLevel.OP]("PipelineMetric"))
self.traces[0].start()

def set_tokens_in_prompt(inout self, tokens_in_prompt: Int):
def set_tokens_in_prompt(mut self, tokens_in_prompt: Int):
"""Provides the count of tokens processed in the prompt."""
self.tokens_in_prompt = tokens_in_prompt

def begin_timing_startup(inout self):
def begin_timing_startup(mut self):
"""Begins measurement of the pipeline startup time."""
self.start_startup = monotonic()
self.traces.append(
Trace[TraceLevel.OP]("startup", parent_id=self.traces[0].event_id)
)
self.traces[-1].start()

def end_timing_startup(inout self):
def end_timing_startup(mut self):
"""Ends measurement of the pipeline startup time."""
self.end_startup = monotonic()
self.traces[-1].end()
_ = self.traces.pop()

def begin_timing_prompt(inout self):
def begin_timing_prompt(mut self):
"""Begins timing from before prompt processing."""
self.start_time_before_prompt = monotonic()

def begin_timing_warmup(inout self):
def begin_timing_warmup(mut self):
"""Begins timing from before an optional warmup run."""
if not self.start_startup or self.end_startup:
raise "Error: Warmup should be included within startup time"
self.start_time_before_warmup = monotonic()

def end_timing_warmup(inout self):
def end_timing_warmup(mut self):
"""Ends measurement of an optional warmup run."""
if not self.start_startup or self.end_startup:
raise "Error: Warmup should be included within startup time"
self.end_warmup = monotonic()

def begin_timing_tokenization(inout self):
def begin_timing_tokenization(mut self):
"""Begins timing from before tokenization."""
if not self.start_time_before_prompt or self.start_time_before_context:
raise "Error: Tokenization should be included within TTFT"
self.start_time_before_tokenization = monotonic()

def end_timing_tokenization(inout self):
def end_timing_tokenization(mut self):
"""Ends measurement of tokenization."""
if not self.start_time_before_prompt or self.start_time_before_context:
raise "Error: Tokenization should be included within TTFT"
self.end_tokenization = monotonic()

def begin_timing_generation(inout self):
def begin_timing_generation(mut self):
"""Begins timing from the first generated token."""
self.start_time_before_generation = monotonic()
self.traces.append(
Expand All @@ -110,7 +110,7 @@ struct Metrics:
)
self.traces[-1].start()

def new_token(inout self):
def new_token(mut self):
"""Increments the total tokens generated and corresponding metrics."""
if not self.start_time_before_context:
# If this is the first token, store the current time for reporting
Expand All @@ -133,7 +133,7 @@ struct Metrics:
)
self.traces[-1].start()

def end_timing(inout self):
def end_timing(mut self):
"""Ends timing token generation."""
self.end_time = monotonic()
for trace in self.traces:
Expand Down
2 changes: 1 addition & 1 deletion examples/graph-api/pipelines/nn/transformer.🔥
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ struct Transformer[model_dtype: DType = DType.float32]:
var rope_scaling: Optional[Symbol]

def __init__(
inout self,
mut self,
dim: Int,
n_heads: Int,
embedding: Embedding,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ struct TeenyTinyLlama[encoding: QuantizationEncoding]:
# Read Llama hyperparameters from the checkpoint.
self.hyperparams = read_hyperparams_from_dict(self.quantized_params)

def build(inout self) -> Graph:
def build(mut self) -> Graph:
"""Build the Llama 2 graph using the quantized weights from checkpoint.
"""
# Set the KV cache and tokens input types.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def param_key(name: String, layer_idx: Optional[Int] = None) -> String:


def add_hyperparams_to_dict(
inout tensor_dict: TensorDict, hyperparams: LlamaHParams
mut tensor_dict: TensorDict, hyperparams: LlamaHParams
):
"""Copies all hyperparameters into a TensorDict for later checkpointing."""
tensor_dict.set(
Expand Down Expand Up @@ -108,7 +108,7 @@ struct TeenyTinyLlama[
self.quantized_params = TensorDict()
add_hyperparams_to_dict(self.quantized_params, self.hyperparams)

def build(inout self) -> Graph:
def build(mut self) -> Graph:
"""Build the Llama 2 graph, quantizing its weights by construction."""
# Set the KV cache and tokens input types.
params = self.params_file.hyperparams()
Expand Down
4 changes: 2 additions & 2 deletions examples/graph-api/pipelines/replit/bpe_tokenizer/ball.mojo
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ struct Ball[T: CollectionElement]:
"""Checks whether the node is still in the list."""
return 0 <= id < len(self._arena) and self._arena[id]

fn append(inout self, owned value: T) -> Self.ID:
fn append(mut self, owned value: T) -> Self.ID:
"""Adds a new element to the back of the list."""
var id = len(self._arena)
var node = Node[T](value^, self._tail, None)
Expand All @@ -74,7 +74,7 @@ struct Ball[T: CollectionElement]:
self._arena.append(node)
return id

fn remove(inout self, id: Self.ID):
fn remove(mut self, id: Self.ID):
"""Removes an element from the list."""
var node = self._arena[id].value()
self._arena[id] = None
Expand Down
Loading

0 comments on commit 6b9fa93

Please sign in to comment.