DIGITALCRIMINAL / ArchivedUltimaScraper

Scrape content from OnlyFans and Fansly
GNU General Public License v3.0
946 stars 39 forks source link

Only able to download all users' posts #282

Closed melbandera closed 2 years ago

melbandera commented 2 years ago

Hey there. When I try to download a single model's posts, I do the following:

python3.10 start_ofd.py
Select Site: 0 = onlyfans | 1 = fansly | 2 = starsavn
0
Choose Profiles: 0 = All | 1 = default
1
Auth (V1) Attempt 1/10
Welcome An human | [name]
Choose Subscriptions: 0 = All | 1 = model1 | 2 = model2
1
Scraping Paid Content
Scraping - model1 | 1 / 2
100%|████████████████████████████████████████████| 1/1 [00:00<00:00, 189.15it/s]
Finished processing metadata.
Renaming files.
Scraping - model2 | 2 / 2
100%|███████████████████████████████████████████| 1/1 [00:00<00:00, 1064.00it/s]
Scraping Subscriptions
Scrape Processing
Name: model1
Scrape: 0 = All | 1 = Images | 2 = Videos | 3 = Audios | 4 = Texts
0

At this point, the scraper begins scraping all models. Some of my subscriptions have like 50 gigs of content. Anything I'm doing wrong?

.settings/config.json:

{
  "info": {
    "version": 8.0
  },
  "settings": {
    "auto_site_choice": "",
    "export_type": "json",
    "profile_directories": [
      ".profiles"
    ],
    "max_threads": -1,
    "min_drive_space": 0,
    "helpers": {
      "renamer": true,
      "reformat_media": true,
      "downloader": true,
      "delete_empty_directories": false
    },
    "webhooks": {
      "global_webhooks": [],
      "global_status": true,
      "auth_webhook": {
        "succeeded": {
          "webhooks": [],
          "status": null,
          "hide_sensitive_info": true
        },
        "failed": {
          "webhooks": [],
          "status": null,
          "hide_sensitive_info": true
        }
      },
      "download_webhook": {
        "succeeded": {
          "webhooks": [],
          "status": null,
          "hide_sensitive_info": true
        },
        "failed": {
          "webhooks": [],
          "status": null,
          "hide_sensitive_info": true
        }
      }
    },
    "exit_on_completion": false,
    "infinite_loop": true,
    "loop_timeout": 0,
    "dynamic_rules_link": "https://raw.githubusercontent.com/DATAHOARDERS/dynamic-rules/main/onlyfans.json",
    "proxies": [],
    "cert": "",
    "random_string": "64785a5473b311ec855e8a094eea7fb1"
  },
  "supported": {
    "onlyfans": {
      "settings": {
        "auto_profile_choice": [],
        "auto_model_choice": false,
        "auto_media_choice": "",
        "auto_api_choice": true,
        "browser": {
          "auth": true
        },
        "jobs": {
          "scrape": {
            "subscriptions": true,
            "paid_content": true
          },
          "metadata": {
            "posts": true,
            "comments": true
          }
        },
        "download_directories": [
          ".sites"
        ],
        "file_directory_format": "{site_name}/{model_username}/{api_type}/{value}/{media_type}",
        "filename_format": "{filename}.{ext}",
        "metadata_directories": [
          ".sites"
        ],
        "metadata_directory_format": "{site_name}/{model_username}/Metadata",
        "delete_legacy_metadata": false,
        "text_length": 255,
        "video_quality": "source",
        "overwrite_files": false,
        "date_format": "%d-%m-%Y",
        "ignored_keywords": [],
        "ignore_type": "",
        "blacklists": [],
        "webhook": true
      }
    },
    "fansly": {
      "settings": {
        "auto_profile_choice": [],
        "auto_model_choice": false,
        "auto_media_choice": "",
        "auto_api_choice": true,
        "browser": {
          "auth": true
        },
        "jobs": {
          "scrape": {
            "subscriptions": true,
            "paid_content": true
          },
          "metadata": {
            "posts": true,
            "comments": true
          }
        },
        "download_directories": [
          ".sites"
        ],
        "file_directory_format": "{site_name}/{model_username}/{api_type}/{value}/{media_type}",
        "filename_format": "{filename}.{ext}",
        "metadata_directories": [
          ".sites"
        ],
        "metadata_directory_format": "{site_name}/{model_username}/Metadata",
        "delete_legacy_metadata": false,
        "text_length": 255,
        "video_quality": "source",
        "overwrite_files": false,
        "date_format": "%d-%m-%Y",
        "ignored_keywords": [],
        "ignore_type": "",
        "blacklists": [],
        "webhook": true
      }
    },
    "starsavn": {
      "settings": {
        "auto_profile_choice": [],
        "auto_model_choice": false,
        "auto_media_choice": "",
        "auto_api_choice": true,
        "browser": {
          "auth": true
        },
        "jobs": {
          "scrape": {
            "subscriptions": true,
            "paid_content": true
          },
          "metadata": {
            "posts": true,
            "comments": true
          }
        },
        "download_directories": [
          ".sites"
        ],
        "file_directory_format": "{site_name}/{model_username}/{api_type}/{value}/{media_type}",
        "filename_format": "{filename}.{ext}",
        "metadata_directories": [
          ".sites"
        ],
        "metadata_directory_format": "{site_name}/{model_username}/Metadata",
        "delete_legacy_metadata": false,
        "text_length": 255,
        "video_quality": "source",
        "overwrite_files": false,
        "date_format": "%d-%m-%Y",
        "ignored_keywords": [],
        "ignore_type": "",
        "blacklists": [],
        "webhook": true
      }
    }
  }
}

Thanks!

DIGITALCRIMINAL commented 2 years ago

The latest commit should fix this

rileypollard12 commented 2 years ago

on latest commit this still happens, i select all then have to select all content for every subscription then after im done everyone it starts scraping content.

melbandera commented 2 years ago

thanks @DIGITALCRIMINALS! the issue is indeed fixed on my end.

DIGITALCRIMINAL commented 2 years ago

on latest commit this still happens, i select all then have to select all content for every subscription then after im done everyone it starts scraping content.

That's how it's supposed to be if you don't use any of the auto_choices