Open bmbouter opened 1 year ago
I tried to replicate the workflow, although I eventually ran out of disk space after about 2 hours:
http -a admin:password POST localhost:5001/pulp/api/v3/repositories/gem/gem/ name="repo"
http -a admin:password POST localhost:5001/pulp/api/v3/remotes/gem/gem/ name="rubygems" policy="on_demand" url="https://rubygems.org/"
http -a admin:password POST localhost:5001/pulp/api/v3/repositories/gem/gem/{repo_href}/sync/ remote="/pulp/api/v3/remotes/gem/gem/{rubygems_href}/"
The sync request returned href of the task, which then ran for 2 hours -- downloading metadata completed quickly, no artifacts were downloaded because of the policy, and the numbers of associated content and parsed metadata were always increasing, until the container ran out of disk space. Following is the state of the task, once it failed:
{
"child_tasks": [],
"created_resources": [],
"error": {
"description": "could not resize shared memory segment \"/PostgreSQL.1390058266\" to 33554432 bytes: No space left on device\n",
"traceback": " File \"/src/pulpcore/pulpcore/tasking/pulpcore_worker.py\", line 453, in _perform_task\n result = func(*args, **kwargs)\n File \"/src/pulp_gem/pulp_gem/app/tasks/synchronizing.py\", line 80, in synchronize\n dv.create()\n File \"/src/pulpcore/pulpcore/plugin/stages/declarative_version.py\", line 161, in create\n loop.run_until_complete(pipeline)\n File \"/usr/lib64/python3.8/asyncio/base_events.py\", line 616, in run_until_complete\n return future.result()\n File \"/src/pulpcore/pulpcore/plugin/stages/api.py\", line 225, in create_pipeline\n await asyncio.gather(*futures)\n File \"/src/pulpcore/pulpcore/plugin/stages/api.py\", line 43, in __call__\n await self.run()\n File \"/src/pulpcore/pulpcore/plugin/stages/content_stages.py\", line 326, in run\n await sync_to_async(self.new_version.add_content)(\n File \"/usr/local/lib/python3.8/site-packages/asgiref/sync.py\", line 448, in __call__\n ret = await asyncio.wait_for(future, timeout=None)\n File \"/usr/lib64/python3.8/asyncio/tasks.py\", line 455, in wait_for\n return await fut\n File \"/usr/lib64/python3.8/concurrent/futures/thread.py\", line 57, in run\n result = self.fn(*self.args, **self.kwargs)\n File \"/usr/local/lib/python3.8/site-packages/asgiref/sync.py\", line 490, in thread_handler\n return func(*args, **kwargs)\n File \"/src/pulpcore/pulpcore/app/models/repository.py\", line 820, in add_content\n to_add = set(content.exclude(pk__in=self.content).values_list(\"pk\", flat=True))\n File \"/usr/local/lib/python3.8/site-packages/django/db/models/query.py\", line 280, in __iter__\n self._fetch_all()\n File \"/usr/local/lib/python3.8/site-packages/django/db/models/query.py\", line 1324, in _fetch_all\n self._result_cache = list(self._iterable_class(self))\n File \"/usr/local/lib/python3.8/site-packages/django/db/models/query.py\", line 171, in __iter__\n for row in compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size):\n File \"/usr/local/lib/python3.8/site-packages/django/db/models/sql/compiler.py\", line 1130, in results_iter\n results = self.execute_sql(MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size)\n File \"/usr/local/lib/python3.8/site-packages/django/db/models/sql/compiler.py\", line 1175, in execute_sql\n cursor.execute(sql, params)\n File \"/usr/local/lib/python3.8/site-packages/django/db/backends/utils.py\", line 66, in execute\n return self._execute_with_wrappers(sql, params, many=False, executor=self._execute)\n File \"/usr/local/lib/python3.8/site-packages/django/db/backends/utils.py\", line 75, in _execute_with_wrappers\n return executor(sql, params, many, context)\n File \"/usr/local/lib/python3.8/site-packages/django/db/backends/utils.py\", line 84, in _execute\n return self.cursor.execute(sql, params)\n File \"/usr/local/lib/python3.8/site-packages/django/db/utils.py\", line 90, in __exit__\n raise dj_exc_value.with_traceback(traceback) from exc_value\n File \"/usr/local/lib/python3.8/site-packages/django/db/backends/utils.py\", line 84, in _execute\n return self.cursor.execute(sql, params)\n"
},
"finished_at": "2023-03-24T12:37:17.732000Z",
"logging_cid": "e0c8f2ec0c5e400dba47fe748082e34e",
"name": "pulp_gem.app.tasks.synchronizing.synchronize",
"parent_task": null,
"progress_reports": [
{
"code": "",
"done": 1,
"message": "Downloading Metadata",
"state": "completed",
"suffix": null,
"total": null
},
{
"code": "sync.downloading.artifacts",
"done": 0,
"message": "Downloading Artifacts",
"state": "canceled",
"suffix": null,
"total": null
},
{
"code": "associating.content",
"done": 615939,
"message": "Associating Content",
"state": "failed",
"suffix": null,
"total": null
},
{
"code": "",
"done": 626005,
"message": "Parsing Metadata",
"state": "canceled",
"suffix": null,
"total": null
}
],
"pulp_created": "2023-03-24T11:01:47.831656Z",
"pulp_href": "/pulp/api/v3/tasks/bdd91c42-7c4b-466c-91c1-57ce7a103500/",
"reserved_resources_record": [
"/pulp/api/v3/repositories/gem/gem/683ffd68-5038-43b0-9a72-988261826091/",
"/pulp/api/v3/remotes/gem/gem/851eddfd-c407-48d6-a90a-bf072d486b6c/"
],
"started_at": "2023-03-24T11:01:47.859934Z",
"state": "failed",
"task_group": null,
"worker": "/pulp/api/v3/workers/d3275b2f-c2b4-4bcb-9802-a3ff8e12ae6b/"
}
I'm unsure if there is a dedicated that that does this or not, but either via automated testing or hand testing, please ensure that an entire mirror with policy=on_demand works for rubygems.org