Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CP] 4.13.1 #11081

Merged
merged 6 commits into from
Jul 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,15 @@
# 4.13.1 Sosta (2024-07-18)

## Features

### Hosts
* Katello should be able to handle subscription-manager environments --set ([#37618](https://projects.theforeman.org/issues/37618), [799e0996](https://github.com/Katello/katello.git/commit/799e09960512f9787d1251ab40089f50324097e6))

## Bug Fixes

### Repositories
* Migrate sha1 repos only at the next edit time ([#37609](https://projects.theforeman.org/issues/37609), [2072d3fd](https://github.com/Katello/katello.git/commit/2072d3fd73260858983a37b98ddf43dcd46fcf44))
* Get rid of unmaintained anemone ([#37159](https://projects.theforeman.org/issues/37159), [e55b8d1d](https://github.com/Katello/katello.git/commit/e55b8d1ddc2aee494d099af56eda81ed7ec33e24))
# 4.13.0 Sosta (2024-06-26)

## Features
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -259,6 +259,14 @@ def server_status
def facts
User.current = User.anonymous_admin
@host.update_candlepin_associations(rhsm_params)
if params[:environments]
new_envs = params[:environments].map do |env|
get_content_view_environment("cp_id", env['id'])
end
new_envs.compact!
Rails.logger.debug "Setting new content view environments for host #{@host.to_label}: #{new_envs.map(&:label)}"
@host.content_facet.content_view_environments = new_envs
end
update_host_registered_through(@host, request.headers)
@host.refresh_statuses([::Katello::RhelLifecycleStatus])
render :json => {:content => _("Facts successfully updated.")}, :status => :ok
Expand Down
15 changes: 11 additions & 4 deletions app/lib/actions/katello/repository/discover.rb
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,17 @@ def run(event = nil)
(on nil do
unless output[:to_follow].empty?
password = decrypt_field(input[:upstream_password])
repo_discovery = ::Katello::RepoDiscovery.new(input[:url], input[:content_type],
input[:upstream_username], password,
input[:search],
output[:crawled], output[:repo_urls], output[:to_follow])
repo_discovery = ::Katello::RepoDiscovery.class_for(input[:content_type]).new(
input[:url],
output[:crawled],
output[:repo_urls],
output[:to_follow],
{
upstream_username: input[:upstream_username],
upstream_password: password,
search: input[:search]
}
)

repo_discovery.run(output[:to_follow].shift)
suspend { |suspended_action| world.clock.ping suspended_action, 0.001 }
Expand Down
4 changes: 4 additions & 0 deletions app/lib/actions/pulp3/repository/create_publication.rb
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,10 @@ def plan(repository, smart_proxy, options)
def invoke_external_task
unless input[:skip_publication_creation]
repository = ::Katello::Repository.find(input[:repository_id])
if repository.root.sha1_checksum?
repository.root.remove_sha1_checksum_type
repository.root.save!
end
output[:response] = repository.backend_service(smart_proxy).with_mirror_adapter.create_publication
end
end
Expand Down
194 changes: 4 additions & 190 deletions app/lib/katello/repo_discovery.rb
Original file line number Diff line number Diff line change
Expand Up @@ -4,203 +4,17 @@ module Katello
class RepoDiscovery
include Katello::Util::HttpProxy

attr_reader :found, :crawled, :to_follow

# rubocop:disable Metrics/ParameterLists
def initialize(url, content_type = 'yum', upstream_username = nil,
upstream_password = nil, search = '*', crawled = [],
found = [], to_follow = [])
@uri = uri(url)
@content_type = content_type
@upstream_username = upstream_username.empty? ? nil : upstream_username
@upstream_password = upstream_password.empty? ? nil : upstream_password
@search = search
@found = found
@crawled = crawled
@to_follow = to_follow
def self.class_for(content_type)
repo_discovery_class = RepositoryTypeManager.find_repository_type(content_type)&.repo_discovery_class
fail _("Content type does not support repo discovery") unless repo_discovery_class
repo_discovery_class
end
# rubocop:enable Metrics/ParameterLists

def uri(url)
#add a / on the end, as directories require it or else
# They will get double slahes on them
url += '/' unless url.ends_with?('/')
URI(url)
end

def run(resume_point)
if @content_type == 'docker'
docker_search
else
if @uri.scheme == 'file'
file_crawl(uri(resume_point))
elsif %w(http https).include?(@uri.scheme)
http_crawl(uri(resume_point))
else
fail _("Unsupported URL protocol %s.") % @uri.scheme
end
end
end

private

def parse_parameter(field_value)
field_value.lstrip!
i = field_value.index(/[ \t=;,]/) || field_value.length
name = field_value.slice!(0, i).downcase(:ascii)
field_value.lstrip!
if field_value.delete_prefix!('=')
field_value.lstrip!
if field_value.delete_prefix!('"')
value = ''
until field_value.empty?
break if field_value.delete_prefix!('"')
field_value.delete_prefix!("\\")
value += field_value.slice!(0, 1) || ''
end
else
i = field_value.index(/[;,]/) || field_value.length
value = field_value.slice!(0, i)
end
end
{name: name, value: value || ''}
end

def parse_parameters(field_value)
seen_rel = false
has_next_rel = false
has_anchor = false
until field_value.empty?
field_value.lstrip!
break if field_value.delete_prefix!(';').nil?
param = parse_parameter(field_value)
case
when param[:name] == 'rel' && !seen_rel
seen_rel = true
has_next_rel = param[:value].downcase(:ascii).split(/[ \t]/).include?('next')
when param[:name] == 'anchor'
has_anchor = true
end
end
{has_next_rel: has_next_rel, has_anchor: has_anchor}
end

def get_next_link(link_header)
# This code mostly follows Appendix B "Algorithms of Parsing Link Header Fields" of RFC 8288
# "Web Linking", <https://www.rfc-editor.org/rfc/rfc8288#appendix-B> (that RFC appears to be
# silent about multiple "next" links, so just use the first one and ignore any additional
# ones, in the general spirit of being lenient):
return nil if link_header.nil?
field_value = link_header.clone
until field_value.empty?
# The following ignores any junk preceding the next <...> link URL:
m = field_value.match(/<(.*)>/)
break unless m
target_string = m[1]
field_value = m.post_match
params = parse_parameters(field_value)
if params[:has_next_rel]
# To keep it simple, ignore a link with an (unlikely) anchor parameter; but the RFC
# mandates that we "MUST NOT process the link without applying the anchor", so just raise
# an exception in that (unlikely) case:
fail "anchor not supported" if params[:has_anchor]
return target_string
end
end
nil
end

def docker_search
request_params = {
method: :get,
headers: { accept: :json },
url: "#{@uri}v1/search?q=#{@search}"
}

request_params[:user] = @upstream_username unless @upstream_username.empty?
request_params[:password] = @upstream_password unless @upstream_password.empty?
request_params[:proxy] = proxy_uri if proxy

begin
results = RestClient::Request.execute(request_params)
JSON.parse(results)['results'].each do |result|
@found << result['name']
end
rescue
# Note: v2 endpoint does not support search
request_params[:url] = "#{@uri}v2/_catalog"
loop do
results = RestClient::Request.execute(request_params)
JSON.parse(results)['repositories'].each do |result|
@found << result
end
next_uri = get_next_link(results.headers[:link])
break if next_uri.nil?
request_params[:url] = URI(request_params[:url]).merge(next_uri).to_s
end
end
@found.sort!
end

def anemone_proxy_details
details = {}

if proxy
details[:proxy_host] = proxy_host
details[:proxy_port] = proxy_port
details[:proxy_user] = proxy.username
details[:proxy_password] = proxy.password
end

details
end

def http_crawl(resume_point)
resume_point_uri = URI(resume_point)
resume_point_uri.user = @upstream_username if @upstream_username
resume_point_uri.password = @upstream_password if @upstream_password

Anemone.crawl(resume_point_uri, anemone_proxy_details) do |anemone|
anemone.focus_crawl do |page|
@crawled << page.url.path

page.links.each do |link|
if link.path.ends_with?('/repodata/')
page_url = page.url.clone
page_url.user = nil
page_url.password = nil
@found << page_url.to_s
else
@to_follow << link.to_s if should_follow?(link.path)
end
end
page.discard_doc! #saves memory, doc not needed
[]
end
end
end

def file_crawl(resume_point)
if resume_point.path.ends_with?('/repodata/')
found_path = Pathname(resume_point.path).parent.to_s
@found << "file://#{found_path}"
end
if resume_point.path == @uri.path
Dir.glob("#{@uri.path}**/").each { |path| @to_follow << path }
@to_follow.shift
end
@crawled << resume_point.path
end

def should_follow?(path)
#Verify:
# * link's path starts with the base url
# * link hasn't already been crawled
# * link ends with '/' so it should be a directory
# * link doesn't end with '/Packages/', as this increases
# processing time and memory usage considerably
return path.starts_with?(@uri.path) && [email protected]?(path) &&
path.ends_with?('/') && !path.ends_with?('/Packages/')
end
end
end
127 changes: 127 additions & 0 deletions app/lib/katello/resources/discovery/container.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,127 @@
module Katello
module Resources
module Discovery
class Container < RepoDiscovery
attr_reader :found, :crawled, :to_follow
def initialize(url, crawled = [], found = [], to_follow = [],
upstream_credentials_and_search = {
upstream_username: nil,
upstream_password: nil,
search: '*'
})
@uri = uri(url)
@upstream_username = upstream_credentials_and_search[:upstream_username].presence
@upstream_password = upstream_credentials_and_search[:upstream_password].presence
@search = upstream_credentials_and_search.fetch(:search, '*')
@found = found
@crawled = crawled
@to_follow = to_follow
end

def run(_resume_point)
docker_search
end

private

def parse_parameter(field_value)
field_value.lstrip!
i = field_value.index(/[ \t=;,]/) || field_value.length
name = field_value.slice!(0, i).downcase(:ascii)
field_value.lstrip!
if field_value.delete_prefix!('=')
field_value.lstrip!
if field_value.delete_prefix!('"')
value = ''
until field_value.empty?
break if field_value.delete_prefix!('"')
field_value.delete_prefix!("\\")
value += field_value.slice!(0, 1) || ''
end
else
i = field_value.index(/[;,]/) || field_value.length
value = field_value.slice!(0, i)
end
end
{name: name, value: value || ''}
end

def parse_parameters(field_value)
seen_rel = false
has_next_rel = false
has_anchor = false
until field_value.empty?
field_value.lstrip!
break if field_value.delete_prefix!(';').nil?
param = parse_parameter(field_value)
case
when param[:name] == 'rel' && !seen_rel
seen_rel = true
has_next_rel = param[:value].downcase(:ascii).split(/[ \t]/).include?('next')
when param[:name] == 'anchor'
has_anchor = true
end
end
{has_next_rel: has_next_rel, has_anchor: has_anchor}
end

def get_next_link(link_header)
# This code mostly follows Appendix B "Algorithms of Parsing Link Header Fields" of RFC 8288
# "Web Linking", <https://www.rfc-editor.org/rfc/rfc8288#appendix-B> (that RFC appears to be
# silent about multiple "next" links, so just use the first one and ignore any additional
# ones, in the general spirit of being lenient):
return nil if link_header.nil?
field_value = link_header.clone
until field_value.empty?
# The following ignores any junk preceding the next <...> link URL:
m = field_value.match(/<(.*)>/)
break unless m
target_string = m[1]
field_value = m.post_match
params = parse_parameters(field_value)
if params[:has_next_rel]
# To keep it simple, ignore a link with an (unlikely) anchor parameter; but the RFC
# mandates that we "MUST NOT process the link without applying the anchor", so just raise
# an exception in that (unlikely) case:
fail "anchor not supported" if params[:has_anchor]
return target_string
end
end
nil
end

def docker_search
request_params = {
method: :get,
headers: { accept: :json },
url: "#{@uri}v1/search?q=#{@search}"
}

request_params[:user] = @upstream_username if @upstream_username
request_params[:password] = @upstream_password if @upstream_password
request_params[:proxy] = proxy_uri if proxy

begin
results = RestClient::Request.execute(request_params)
JSON.parse(results)['results'].each do |result|
@found << result['name']
end
rescue
# Note: v2 endpoint does not support search
request_params[:url] = "#{@uri}v2/_catalog"
loop do
results = RestClient::Request.execute(request_params)
JSON.parse(results)['repositories'].each do |result|
@found << result
end
next_uri = get_next_link(results.headers[:link])
break if next_uri.nil?
request_params[:url] = URI(request_params[:url]).merge(next_uri).to_s
end
end
@found.sort!
end
end
end
end
end
Loading
Loading