Skip to content

Commit

Permalink
Merge pull request #50 from block/myron/fix-steep
Browse files Browse the repository at this point in the history
Upgrade Steep to 1.9 and address new type check failures.
  • Loading branch information
myronmarston authored Dec 10, 2024
2 parents d9c0bb5 + 76ee6c4 commit 1adf570
Show file tree
Hide file tree
Showing 23 changed files with 50 additions and 39 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ def rollover_index_suffix_for_record(record, timestamp_field_path:)
return matching_custom_range.index_name_suffix
end

timestamp_value.strftime(ROLLOVER_SUFFIX_FORMATS_BY_FREQUENCY[frequency])
timestamp_value.strftime(ROLLOVER_SUFFIX_FORMATS_BY_FREQUENCY.fetch(frequency))
end

def concrete_rollover_index_for(index_name, setting_overrides, time_set = nil)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ def unmerge_response(response_from_merged_query, original_query)
# If there are no aggregations, there's nothing to unmerge--just return it as is.
return response_from_merged_query unless (aggs = response_from_merged_query["aggregations"])

prefix = @unique_prefix_by_query[original_query]
prefix = @unique_prefix_by_query[original_query] # : ::String
agg_names = original_query.aggregations.keys.map { |name| "#{prefix}#{name}" }.to_set

filtered_aggs = aggs
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def initialize(schema_element_names:)
# vs GraphQL.
def translate_filter_args(field:, args:)
return nil unless (filter_hash = args[filter_arg_name])
filter_type = field.schema.type_from(field.graphql_field.arguments[filter_arg_name].type)
filter_type = field.schema.type_from(field.graphql_field.arguments.fetch(filter_arg_name).type)
convert(filter_type, filter_hash)
end

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -197,7 +197,7 @@ def normalized_headers
end

def content_type
@content_type ||= normalized_headers["CONTENT-TYPE"]
normalized_headers["CONTENT-TYPE"]
end

def self.normalize_header_name(header)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def initialize(datastore_router, query_tracker)
def fetch(queries)
responses_by_query = @datastore_router.msearch(queries, query_tracker: @query_tracker)
@query_tracker.record_datastore_queries_for_single_request(queries)
queries.map { |q| responses_by_query[q] }
queries.map { |q| responses_by_query.fetch(q) }
end

def self.execute_many(queries, for_context:)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -84,8 +84,7 @@ module ElasticGraph
@normalized_headers: ::Hash[::String, ::String]?
def normalized_headers: () -> ::Hash[::String, ::String]

@content_type: ::String?
def content_type: () -> ::String
def content_type: () -> ::String?

def self.normalize_header_name: (::String) -> ::String
end
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ def validate_and_normalize_config(config)
#
# So below, filter to types that have all of their datastore clusters available for querying.
available_type_names, unavailable_type_names = valid_type_names.partition do |type_name|
@indexed_document_types_by_name[type_name].search_index_definitions.all? do |search_index_definition|
@indexed_document_types_by_name.fetch(type_name).search_index_definitions.all? do |search_index_definition|
@datastore_clients_by_name.key?(search_index_definition.cluster_to_query.to_s)
end
end
Expand All @@ -190,6 +190,7 @@ def validate_and_normalize_config(config)
.fields_by_name[check.timestamp_field]

if field&.type&.unwrap_fully&.name.to_s == "DateTime"
# @type var field: GraphQL::Schema::Field
# Convert the config so that we have a reference to the index field name.
normalized_data_recency_checks[type] = check.with(timestamp_field: field.name_in_index.to_s)
else
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,10 +77,8 @@ def bulk(operations, refresh: false)
# Before writing these operations, verify their destination index mapping are consistent.
validate_mapping_completeness_of!(:accessible_cluster_names_to_index_into, *operations.map(&:destination_index_def).uniq)

# @type var ops_by_client: ::Hash[DatastoreCore::_Client, ::Array[_Operation]]
ops_by_client = ::Hash.new { |h, k| h[k] = [] }
# @type var unsupported_ops: ::Set[_Operation]
unsupported_ops = ::Set.new
ops_by_client = ::Hash.new { |h, k| h[k] = [] } # : ::Hash[DatastoreCore::_Client, ::Array[_Operation]]
unsupported_ops = ::Set.new # : ::Set[_Operation]

operations.reject { |op| op.to_datastore_bulk.empty? }.each do |op|
# Note: this intentionally does not use `accessible_cluster_names_to_index_into`.
Expand All @@ -90,7 +88,8 @@ def bulk(operations, refresh: false)

cluster_names.each do |cluster_name|
if (client = @datastore_clients_by_name[cluster_name])
ops_by_client[client] << op
ops = ops_by_client[client] # : ::Array[::ElasticGraph::Indexer::_Operation]
ops << op
else
unsupported_ops << op
end
Expand Down Expand Up @@ -293,8 +292,7 @@ def source_event_versions_in_index(operations)
if failures.empty?
client_names_and_results.each_with_object(_ = {}) do |(client_name, _success_or_failure, results), accum|
results.each do |op, version|
accum[op] ||= _ = {}
accum[op][client_name] = version
(accum[op] ||= {})[client_name] = version
end
end
else
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def select_json_schema_version(event)
end

def validator(type, selected_json_schema_version)
factory = validator_factories_by_version[selected_json_schema_version]
factory = validator_factories_by_version[selected_json_schema_version] # : JSONSchema::ValidatorFactory
factory.validator_for(type)
end

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def initialize(schema_artifacts)

# Gets the `RecordPreparer` for the given JSON schema version.
def for_json_schema_version(json_schema_version)
@preparers_by_json_schema_version[json_schema_version]
@preparers_by_json_schema_version[json_schema_version] # : RecordPreparer
end

# Gets the `RecordPreparer` for the latest JSON schema version. Intended primarily
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,8 @@ def get_lowest_node_free_storage_in_mb(cluster_name)
]
})

metric_response.metric_data_results.first.values.first
results = metric_response.metric_data_results.first # : ::Aws::CloudWatch::Types::MetricDataResult
results.values.first.to_f
end

def get_queue_attributes(queue_urls)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def initialize(schema:, sanitize_pii:)
# @param type_name [String] name of an ElasticGraph type
# @return [Validator]
def validator_for(type_name)
@validators_by_type_name[type_name]
@validators_by_type_name[type_name] # : Validator
end

# Returns a new factory configured to disallow unknown properties. By default, JSON schema
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,8 @@ def variables_errors_for(operation_name, old_dumped_variables, new_dumped_variab
return [{"message" => "No dumped variables for this operation exist. Correct by running: `#{rake_task}`"}]
end

old_op_vars = old_dumped_variables[operation_name]
new_op_vars = new_dumped_variables[operation_name]
old_op_vars = old_dumped_variables.fetch(operation_name)
new_op_vars = new_dumped_variables.fetch(operation_name)

if old_op_vars == new_op_vars
# The previously dumped variables are up-to-date. No errors in this case.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,12 +32,12 @@ def detect_incompatibilities(old, new, path, entry_type)
end

incompatible_commonalities = commonalities.flat_map do |name|
incompatibilities_for("#{path}#{name}", normalize_type_info(old[name]), normalize_type_info(new[name]))
incompatibilities_for("#{path}#{name}", normalize_type_info(old.fetch(name)), normalize_type_info(new.fetch(name)))
end

incompatible_additions = additions.filter_map do |name|
# Additions are only incompatible if it's required (non-nullable).
_ = if normalize_type_info(new[name]).fetch("type").end_with?("!")
_ = if normalize_type_info(new.fetch(name)).fetch("type").end_with?("!")
Incompatibility.new("#{path}#{name}", "new required #{entry_type}")
end
end
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def json_schemas_for(version)
"Available versions: #{available_json_schema_versions.sort.join(", ")}."
end

json_schemas_by_version[version]
json_schemas_by_version[version] # : ::Hash[::String, untyped]
end

def available_json_schema_versions
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ def signature_code_for(object, method_name)
# @type var file_name: ::String?
# @type var line_number: ::Integer?
file_name, line_number = object.instance_method(method_name).source_location
::File.read(file_name.to_s).split("\n")[line_number.to_i - 1].strip
::File.read(file_name.to_s).split("\n").fetch(line_number.to_i - 1).strip
end
end
end
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,8 @@ def customize_derived_types(*type_names, &customization_block)
derived_type_customizations_for_all_types << customization_block
else
type_names.each do |t|
derived_type_customizations_by_name[t.to_s] << customization_block
derived_type_customizations = derived_type_customizations_by_name[t.to_s] # : ::Array[^(::ElasticGraph::SchemaDefinition::_Type) -> void]
derived_type_customizations << customization_block
end
end
end
Expand All @@ -75,21 +76,23 @@ def customize_derived_types(*type_names, &customization_block)
# end
# end
def customize_derived_type_fields(type_name, *field_names, &customization_block)
customizations_by_field = derived_field_customizations_by_type_and_field_name[type_name]
customizations_by_field = derived_field_customizations_by_type_and_field_name[type_name] # : ::Hash[::String, ::Array[^(::ElasticGraph::SchemaDefinition::SchemaElements::Field) -> void]]

field_names.each do |field_name|
customizations_by_field[field_name] << customization_block
customizations = customizations_by_field[field_name] # : ::Array[^(::ElasticGraph::SchemaDefinition::SchemaElements::Field) -> void]
customizations << customization_block
end
end

# @private
def derived_type_customizations_for_type(type)
derived_type_customizations_by_name[type.name] + derived_type_customizations_for_all_types
derived_type_customizations = derived_type_customizations_by_name[type.name] # : ::Array[^(::ElasticGraph::SchemaDefinition::_Type) -> void]
derived_type_customizations + derived_type_customizations_for_all_types
end

# @private
def derived_field_customizations_by_name_for_type(type)
derived_field_customizations_by_type_and_field_name[type.name]
derived_field_customizations_by_type_and_field_name[type.name] # : ::Hash[::String, ::Array[^(SchemaElements::Field) -> void]]
end

# @private
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,8 +75,9 @@ def merge_fields_by_name_from_subtypes
fields_by_name.merge(yield subtype) do |field_name, def1, def2|
if (def1.name_in_index == def2.name_in_index && def1.resolve_mapping != def2.resolve_mapping) || (def1.type.unwrap_non_null != def2.type.unwrap_non_null)
def_strings = resolved_subtypes.each_with_object([]) do |st, defs|
field = st.graphql_fields_by_name[field_name]
defs << "on #{st.name}:\n#{field.to_sdl.strip} mapping: #{field.resolve_mapping.inspect}" if st.graphql_fields_by_name.key?(field_name)
if (field = st.graphql_fields_by_name[field_name])
defs << "on #{st.name}:\n#{field.to_sdl.strip} mapping: #{field.resolve_mapping.inspect}"
end
end

raise Errors::SchemaError,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,8 @@ module ImplementsInterfaces
def implements(*interface_names)
interface_refs = interface_names.map do |interface_name|
schema_def_state.type_ref(interface_name).to_final_form.tap do |interface_ref|
schema_def_state.implementations_by_interface_ref[interface_ref] << self
implementations = schema_def_state.implementations_by_interface_ref[interface_ref] # : ::Set[SchemaElements::TypeWithSubfields]
implementations << self
end
end

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -329,7 +329,7 @@ def check_for_circular_dependencies!

def recursively_add_referenced_types_to(source_type_ref, references_cache)
return unless (source_type = source_type_ref.as_object_type)
references_set = references_cache[source_type_ref.name]
references_set = references_cache[source_type_ref.name] # : ::Set[::String]

# Recursive references are allowed only when its a relation, so skip that case.
source_type.graphql_fields_by_name.values.reject { |f| f.relationship }.each do |field|
Expand All @@ -339,7 +339,8 @@ def recursively_add_referenced_types_to(source_type_ref, references_cache)
recursively_add_referenced_types_to(field_type, references_cache)
end

references_set.merge(references_cache[field_type.name])
field_type_references_set = references_cache[field_type.name] # : ::Set[::String]
references_set.merge(field_type_references_set)
end
end

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,8 @@ def initialize(overrides_by_type_name = {})

# Returns the name that should be used for the given `type_name` and `value_name`.
def name_for(type_name, value_name)
@used_value_names_by_type_name[type_name] << value_name
used_value_names = @used_value_names_by_type_name[type_name] # : ::Array[::String]
used_value_names << value_name
overrides_by_type_name.dig(type_name, value_name) || value_name
end

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -139,15 +139,17 @@ def register_deleted_type(type_name, defined_at:, defined_via:)
end

def register_renamed_field(type_name, from:, to:, defined_at:, defined_via:)
renamed_fields_by_type_name_and_old_field_name[type_name][from] = factory.new_deprecated_element(
renamed_fields_by_old_field_name = renamed_fields_by_type_name_and_old_field_name[type_name] # : ::Hash[::String, SchemaElements::DeprecatedElement]
renamed_fields_by_old_field_name[from] = factory.new_deprecated_element(
to,
defined_at: defined_at,
defined_via: defined_via
)
end

def register_deleted_field(type_name, field_name, defined_at:, defined_via:)
deleted_fields_by_type_name_and_old_field_name[type_name][field_name] = factory.new_deprecated_element(
deleted_fields_by_old_field_name = deleted_fields_by_type_name_and_old_field_name[type_name] # : ::Hash[::String, SchemaElements::DeprecatedElement]
deleted_fields_by_old_field_name[field_name] = factory.new_deprecated_element(
field_name,
defined_at: defined_at,
defined_via: defined_via
Expand Down
5 changes: 4 additions & 1 deletion gemspec_helper.rb
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,10 @@ def self.define_elasticgraph_gem(gemspec_file:, category:)

# Steep is our type checker. Only needed if there's a `sig` directory.
if ::Dir.exist?(::File.join(gem_dir, "sig"))
spec.add_development_dependency "steep", "~> 1.8"
# New steep minor versions often introduce stricter type checks that we may initially fail,
# so we don't want to automatically upgrade when a new steep minor version is released.
# Therefore, we list 3 digits (in contrast to most gems).
spec.add_development_dependency "steep", "~> 1.9.0"
end

# If the gem has a `spec` directory then it needs our standard set of testing gems.
Expand Down

0 comments on commit 1adf570

Please sign in to comment.