Rapidshare to Megaupload script

A

aiwarrior

I've made this script and would like to have some input and share it
with the community.
I also have a page with some code i produce on my spare time. http://pneves..net
Thanks

# -*- coding: utf-8 -*-
## I Paulo Neves am the owner of this script and i do not allow the
copy or distribution
## of this script without my permission to commercial purposes
## If you have any suggestions please mail me and i will reply and
post the on the site
from __future__ import with_statement
from urlparse import urljoin
import urllib2, urllib
import os, sys
import re
import ConfigParser


urls = []
acc_details = 0
firs_run = 0


def from_rapidshare(url):
'''Check if this is a rapidshare link'''
return (url.startswith("rapidshare.com") or
url.startswith("www.rapidshare.com") or
url.startswith("http://rapidshare.com") or
url.startswith("http://www.rapidshare.com"))


def account_details():
if os.environ.has_key("APPDATA") and os.path.exists(os.environ
["APPDATA"]):
path = os.environ["APPDATA"] + "\\murs.cfg"
else:
path = os.path.expanduser("~") + "\.murs.cfg"

if not os.path.exists(path):
print path
m_user = raw_input("Enter your user name for megaupload: ")
m_password = raw_input("Enter your password for megaupload: ")
r_user = raw_input("Enter your user name for rapidshare: ")
r_password = raw_input("Enter your password for rapidshare: ")
cfg = ConfigParser.SafeConfigParser()
cfg.add_section('Acc_Details')
cfg.set('Acc_Details', 'm_user ', m_user)
cfg.set('Acc_Details', 'm_password', m_password)
cfg.set('Acc_Details', 'r_user', r_user)
cfg.set('Acc_Details', 'r_password', r_password)
with open(path, 'wb') as configfile:
cfg.write(configfile)

cfg = ConfigParser.SafeConfigParser()
cfg.read(path)
try:
m_user = cfg.get("Acc_Details", "m_user")
m_password = cfg.get("Acc_Details", "m_password")
r_user = cfg.get("Acc_Details", "r_user")
r_password = cfg.get("Acc_Details", "r_password")
except ConfigParser.NoSectionError or ConfigParser.NoOptionError:
print "no section or No Option"
print
os.remove(path)
return (m_user, m_password, r_user, r_password)

def cookie_processor(cookie):
cookie = cookie.split("; ")
## cookie = dict( [cookie[x].split("=") for x in xrange ( len
(cookie) ) ] )
## if cookie['user'] != None:
return cookie
## else:
## print "Scheme has changed or authentication failes. Last
option most likely"
## sys.exit()
##

def rs_auth(login):
r_user = login[0]
r_password = login[1]
opener = urllib2.build_opener(urllib2.HTTPSHandler())
urllib2.install_opener( opener )
cred = urllib.urlencode({"login": r_user, "password": r_password})
try:
req = urllib2.urlopen("https://ssl.rapidshare.com/cgi-bin/
premiumzone.cgi", cred)
cookie_rs = cookie_processor( req.headers.get("set-cookie",
"") )
except urllib2.URLError:
print"Some error with the connection ocurred. Try again now or
later"
sys.exit()
#Returns the page if flag is set
return req.read(), cookie_rs


def mu_auth(login):
r_user = login[0]
r_password = login[1]
cred = urllib.urlencode({"login": r_user, "password": r_password})
try:
req = urllib2.urlopen("http://www.megaupload.com", cred)
cookie_mu = cookie_processor( req.headers.get("set-cookie",
"") )

except:
print "Connection failed"
sys.exit()


#returns the authenticated header, in case future changes
specificaly ask the cookie it would be easy
#to change code or even extract it from the header itself as a
dictionary key
return cookie_mu

def set_rscookie_in_musite(cookie_mu, cookie_rs) :
#it doesnt need to check for the cookie because it automaticaly
resets for the new value
cookie_rs = cookie_rs.split("=",1)
header = {"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain", "cookie":cookie_mu}
params = urllib.urlencode({'domain': "http://rapidshare.com",
'cookiename1': cookie_rs[0], 'cookievalue1': cookie[1] })
req = urllib2.Request("http://www.megaupload.com/multifetch/?
c=cookies", params, header )
print header
print params
#r = urllib2.urlopen(req)
return True

acc_details = account_details()
if len(sys.argv)==1:
print "Try -help for more information"
sys.exit("No arguments")


if sys.argv[1] == "-h" or sys.argv[1] == "–help":
print " > murs http://megaupload.com/?d=FILE1http://megaupload.com/?d=FILEN"
print " Download one or several rapidshare links passed as
argument separated by whitespace\n"
print " >murs links.txt"
print " Download a list of links from a file\n"
print " >murs account"
print " Automaticaly downloads the entire contents of your
account"
sys.exit()
## Process the arguments
## AFTER THIS ITS ALL ROLLING


if sys.argv[1] == "account" or sys.argv[1] == "-a":
#Getting the rapidshare user and password respectivelyfrom the
account details already called
print acc_details[2:]
page = rs_auth( acc_details[2:] )
rex = re.findall('''Adliste\["(........|.)"\]\["filename"\] =
"(.*)"''', page[0])
for retrieved_data in rex:
#as there are two matches the retrieved_data is still a
list
urls.append("http://rapidshare.com/files/" + retrieved_data
[0] + "/" + retrieved_data[1])

elif os.path.exists(sys.argv[1]): # If this is a file
from_file = True
l_file = sys.argv[1]
print "Reading list of links from the file " , l_file
print "\n"
list_f = file(l_file, "r")
urls = list_f.readlines()
list_f.close()
if not from_rapidshare(urls):
sys.exit("Urls in text file were not valid")


elif from_rapidshare(sys.argv[1]): #if argument is one or more urls
for i in range(1, len(sys.argv)):
if from_rapidshare(sys.argv):
urls.append(sys.argv)
else:
print "This is not valid argument" , sys.argv[1]
sys.exit()
urls = []

cookie_mu = mu_auth(acc_details[:2])
print cookie_mu
headers = {"User Agent": "Mozilla/5.0", "Accept": "text/plain",
"cookie":cookie_mu[0]}
for url in urls:
try:
params = urllib.urlencode({'srcurl': url, 'description':
"something"})
req = urllib2.Request("http://megaupload.com/multifetch/",
params, headers )
r = urllib2.urlopen(req)

except:
print "An error ocurred while trying to talk to megaupload.
Make sure you enter the correct password"
 
M

MRAB

aiwarrior said:
I've made this script and would like to have some input and share it
with the community.
I also have a page with some code i produce on my spare time. http://pneves.net
Thanks
[snip]

def from_rapidshare(url):
'''Check if this is a rapidshare link'''
return (url.startswith("rapidshare.com") or
url.startswith("www.rapidshare.com") or
url.startswith("http://rapidshare.com") or
url.startswith("http://www.rapidshare.com"))
You can shorten:

s.startswith(x) or s.startswith(y) or ...

to:

s.startswith((x, y, ...))

[snip]
cfg = ConfigParser.SafeConfigParser()
cfg.read(path)
try:
m_user = cfg.get("Acc_Details", "m_user")
m_password = cfg.get("Acc_Details", "m_password")
r_user = cfg.get("Acc_Details", "r_user")
r_password = cfg.get("Acc_Details", "r_password")
except ConfigParser.NoSectionError or ConfigParser.NoOptionError:

In order to catch multiple exceptions it should be:

except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
print "no section or No Option"
print
os.remove(path)
return (m_user, m_password, r_user, r_password)

def cookie_processor(cookie):
cookie = cookie.split("; ")
## cookie = dict( [cookie[x].split("=") for x in xrange ( len
(cookie) ) ] )
Or:
## cookie = dict(x.split("=") for x in cookie)
## if cookie['user'] != None:
When checking for None use "is" and "is not":
## if cookie['user'] is not None:
return cookie
## else:
## print "Scheme has changed or authentication failes. Last
option most likely"
## sys.exit()
Raising an exception is better than exiting.
##
[snip]
def mu_auth(login):
r_user = login[0]
r_password = login[1]
cred = urllib.urlencode({"login": r_user, "password": r_password})
try:
req = urllib2.urlopen("http://www.megaupload.com", cred)
cookie_mu = cookie_processor( req.headers.get("set-cookie",
"") )

except:
Don't use an empty "except"; be explicit in which exception you want to
catch .

[snip]
if sys.argv[1] == "-h" or sys.argv[1] == "–help":
Or:

if sys.argv[1] in ("-h", "–help"):

[snip]
elif from_rapidshare(sys.argv[1]): #if argument is one or more urls
for i in range(1, len(sys.argv)):
Or:

for arg in sys.argv[1 : ]:

and so on.
if from_rapidshare(sys.argv):
urls.append(sys.argv)
else:
print "This is not valid argument" , sys.argv[1]
sys.exit()
urls = []

[snip]
HTH
 
A

aiwarrior

Thanks a lot for your input i really needed because i realized these
are minor flaws but even so define whether its good or bad code and i
really need to improve that. I already implemented the changes you
suggested and this one,
cookie = dict(x.split("=") for x in cookie)

for me is just very good and really is elegant.
An aspect you didn't mention was the
urls.append("http://rapidshare.com/files/" + retrieved_data[0] + "/" + retrieved_data[1])
I think its very hackish and crude but urlparser doesnt seem to accept
more than one argument at a time and doing it in a loop seem worse
than the current solution. What would you do?
Thanks a lot again
 
A

aiwarrior

Thanks a lot for your input i really needed because i realized these
are minor flaws but even so define whether its good or bad code and i
really need to improve that. I already implemented the changes you
suggested and this one,
cookie = dict(x.split("=") for x in cookie)

for me is just very good and really is elegant.
An aspect you didn't mention was the
urls.append("http://rapidshare.com/files/" + retrieved_data[0] + "/" + retrieved_data[1])
I think its very hackish and crude but urlparser doesnt seem to accept
more than one argument at a time and doing it in a loop seem worse
than the current solution. What would you do?
Thanks a lot again
 
M

MRAB

aiwarrior said:
Thanks a lot for your input i really needed because i realized these
are minor flaws but even so define whether its good or bad code and i
really need to improve that. I already implemented the changes you
suggested and this one,
cookie = dict(x.split("=") for x in cookie)

for me is just very good and really is elegant. An aspect you didn't
mention was the
urls.append("http://rapidshare.com/files/" + retrieved_data[0] +
"/" + retrieved_data[1])
I think its very hackish and crude but urlparser doesnt seem to
accept more than one argument at a time and doing it in a loop seem
worse than the current solution. What would you do?
re.findall() returns a list of tuples, so retrieved_data is a tuple and
the following will work:

urls.append("http://rapidshare.com/files/%s/%s" % retrieved_data)
 

Ask a Question

Want to reply to this thread or ask your own question?

You'll need to choose a username for the site, which only take a couple of moments. After that, you can post your question and our members will help you out.

Ask a Question

Members online

Forum statistics

Threads
473,764
Messages
2,569,564
Members
45,039
Latest member
CasimiraVa

Latest Threads

Top